2 * Copyright (C) 2015 Red Hat, Inc.
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
29 #include <linux/dma-mapping.h>
30 #include <linux/virtio.h>
31 #include <linux/virtio_config.h>
32 #include <linux/virtio_ring.h>
34 #include "virtgpu_drv.h"
35 #include "virtgpu_trace.h"
37 #define MAX_INLINE_CMD_SIZE 96
38 #define MAX_INLINE_RESP_SIZE 24
39 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
40 + MAX_INLINE_CMD_SIZE \
41 + MAX_INLINE_RESP_SIZE)
43 static void convert_to_hw_box(struct virtio_gpu_box
*dst
,
44 const struct drm_virtgpu_3d_box
*src
)
46 dst
->x
= cpu_to_le32(src
->x
);
47 dst
->y
= cpu_to_le32(src
->y
);
48 dst
->z
= cpu_to_le32(src
->z
);
49 dst
->w
= cpu_to_le32(src
->w
);
50 dst
->h
= cpu_to_le32(src
->h
);
51 dst
->d
= cpu_to_le32(src
->d
);
54 void virtio_gpu_ctrl_ack(struct virtqueue
*vq
)
56 struct drm_device
*dev
= vq
->vdev
->priv
;
57 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
59 schedule_work(&vgdev
->ctrlq
.dequeue_work
);
62 void virtio_gpu_cursor_ack(struct virtqueue
*vq
)
64 struct drm_device
*dev
= vq
->vdev
->priv
;
65 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
67 schedule_work(&vgdev
->cursorq
.dequeue_work
);
70 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device
*vgdev
)
72 vgdev
->vbufs
= kmem_cache_create("virtio-gpu-vbufs",
74 __alignof__(struct virtio_gpu_vbuffer
),
81 void virtio_gpu_free_vbufs(struct virtio_gpu_device
*vgdev
)
83 kmem_cache_destroy(vgdev
->vbufs
);
87 static struct virtio_gpu_vbuffer
*
88 virtio_gpu_get_vbuf(struct virtio_gpu_device
*vgdev
,
89 int size
, int resp_size
, void *resp_buf
,
90 virtio_gpu_resp_cb resp_cb
)
92 struct virtio_gpu_vbuffer
*vbuf
;
94 vbuf
= kmem_cache_zalloc(vgdev
->vbufs
, GFP_KERNEL
);
96 return ERR_PTR(-ENOMEM
);
98 BUG_ON(size
> MAX_INLINE_CMD_SIZE
);
99 vbuf
->buf
= (void *)vbuf
+ sizeof(*vbuf
);
102 vbuf
->resp_cb
= resp_cb
;
103 vbuf
->resp_size
= resp_size
;
104 if (resp_size
<= MAX_INLINE_RESP_SIZE
)
105 vbuf
->resp_buf
= (void *)vbuf
->buf
+ size
;
107 vbuf
->resp_buf
= resp_buf
;
108 BUG_ON(!vbuf
->resp_buf
);
112 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device
*vgdev
,
113 struct virtio_gpu_vbuffer
**vbuffer_p
,
116 struct virtio_gpu_vbuffer
*vbuf
;
118 vbuf
= virtio_gpu_get_vbuf(vgdev
, size
,
119 sizeof(struct virtio_gpu_ctrl_hdr
),
123 return ERR_CAST(vbuf
);
129 static struct virtio_gpu_update_cursor
*
130 virtio_gpu_alloc_cursor(struct virtio_gpu_device
*vgdev
,
131 struct virtio_gpu_vbuffer
**vbuffer_p
)
133 struct virtio_gpu_vbuffer
*vbuf
;
135 vbuf
= virtio_gpu_get_vbuf
136 (vgdev
, sizeof(struct virtio_gpu_update_cursor
),
140 return ERR_CAST(vbuf
);
143 return (struct virtio_gpu_update_cursor
*)vbuf
->buf
;
146 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device
*vgdev
,
147 virtio_gpu_resp_cb cb
,
148 struct virtio_gpu_vbuffer
**vbuffer_p
,
149 int cmd_size
, int resp_size
,
152 struct virtio_gpu_vbuffer
*vbuf
;
154 vbuf
= virtio_gpu_get_vbuf(vgdev
, cmd_size
,
155 resp_size
, resp_buf
, cb
);
158 return ERR_CAST(vbuf
);
161 return (struct virtio_gpu_command
*)vbuf
->buf
;
164 static void free_vbuf(struct virtio_gpu_device
*vgdev
,
165 struct virtio_gpu_vbuffer
*vbuf
)
167 if (vbuf
->resp_size
> MAX_INLINE_RESP_SIZE
)
168 kfree(vbuf
->resp_buf
);
169 kvfree(vbuf
->data_buf
);
170 kmem_cache_free(vgdev
->vbufs
, vbuf
);
173 static void reclaim_vbufs(struct virtqueue
*vq
, struct list_head
*reclaim_list
)
175 struct virtio_gpu_vbuffer
*vbuf
;
179 while ((vbuf
= virtqueue_get_buf(vq
, &len
))) {
180 list_add_tail(&vbuf
->list
, reclaim_list
);
184 DRM_DEBUG("Huh? zero vbufs reclaimed");
187 void virtio_gpu_dequeue_ctrl_func(struct work_struct
*work
)
189 struct virtio_gpu_device
*vgdev
=
190 container_of(work
, struct virtio_gpu_device
,
192 struct list_head reclaim_list
;
193 struct virtio_gpu_vbuffer
*entry
, *tmp
;
194 struct virtio_gpu_ctrl_hdr
*resp
;
197 INIT_LIST_HEAD(&reclaim_list
);
198 spin_lock(&vgdev
->ctrlq
.qlock
);
200 virtqueue_disable_cb(vgdev
->ctrlq
.vq
);
201 reclaim_vbufs(vgdev
->ctrlq
.vq
, &reclaim_list
);
203 } while (!virtqueue_enable_cb(vgdev
->ctrlq
.vq
));
204 spin_unlock(&vgdev
->ctrlq
.qlock
);
206 list_for_each_entry(entry
, &reclaim_list
, list
) {
207 resp
= (struct virtio_gpu_ctrl_hdr
*)entry
->resp_buf
;
209 trace_virtio_gpu_cmd_response(vgdev
->ctrlq
.vq
, resp
);
211 if (resp
->type
!= cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA
)) {
212 if (resp
->type
>= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC
)) {
213 struct virtio_gpu_ctrl_hdr
*cmd
;
214 cmd
= (struct virtio_gpu_ctrl_hdr
*)entry
->buf
;
215 DRM_ERROR("response 0x%x (command 0x%x)\n",
216 le32_to_cpu(resp
->type
),
217 le32_to_cpu(cmd
->type
));
219 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp
->type
));
221 if (resp
->flags
& cpu_to_le32(VIRTIO_GPU_FLAG_FENCE
)) {
222 u64 f
= le64_to_cpu(resp
->fence_id
);
225 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
226 __func__
, fence_id
, f
);
232 entry
->resp_cb(vgdev
, entry
);
234 wake_up(&vgdev
->ctrlq
.ack_queue
);
237 virtio_gpu_fence_event_process(vgdev
, fence_id
);
239 list_for_each_entry_safe(entry
, tmp
, &reclaim_list
, list
) {
241 virtio_gpu_array_put_free_delayed(vgdev
, entry
->objs
);
242 list_del(&entry
->list
);
243 free_vbuf(vgdev
, entry
);
247 void virtio_gpu_dequeue_cursor_func(struct work_struct
*work
)
249 struct virtio_gpu_device
*vgdev
=
250 container_of(work
, struct virtio_gpu_device
,
251 cursorq
.dequeue_work
);
252 struct list_head reclaim_list
;
253 struct virtio_gpu_vbuffer
*entry
, *tmp
;
255 INIT_LIST_HEAD(&reclaim_list
);
256 spin_lock(&vgdev
->cursorq
.qlock
);
258 virtqueue_disable_cb(vgdev
->cursorq
.vq
);
259 reclaim_vbufs(vgdev
->cursorq
.vq
, &reclaim_list
);
260 } while (!virtqueue_enable_cb(vgdev
->cursorq
.vq
));
261 spin_unlock(&vgdev
->cursorq
.qlock
);
263 list_for_each_entry_safe(entry
, tmp
, &reclaim_list
, list
) {
264 list_del(&entry
->list
);
265 free_vbuf(vgdev
, entry
);
267 wake_up(&vgdev
->cursorq
.ack_queue
);
270 /* Create sg_table from a vmalloc'd buffer. */
271 static struct sg_table
*vmalloc_to_sgt(char *data
, uint32_t size
, int *sg_ents
)
274 struct sg_table
*sgt
;
275 struct scatterlist
*sg
;
278 if (WARN_ON(!PAGE_ALIGNED(data
)))
281 sgt
= kmalloc(sizeof(*sgt
), GFP_KERNEL
);
285 *sg_ents
= DIV_ROUND_UP(size
, PAGE_SIZE
);
286 ret
= sg_alloc_table(sgt
, *sg_ents
, GFP_KERNEL
);
292 for_each_sg(sgt
->sgl
, sg
, *sg_ents
, i
) {
293 pg
= vmalloc_to_page(data
);
300 s
= min_t(int, PAGE_SIZE
, size
);
301 sg_set_page(sg
, pg
, s
, 0);
310 static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device
*vgdev
,
311 struct virtio_gpu_vbuffer
*vbuf
,
312 struct scatterlist
*vout
)
313 __releases(&vgdev
->ctrlq
.qlock
)
314 __acquires(&vgdev
->ctrlq
.qlock
)
316 struct virtqueue
*vq
= vgdev
->ctrlq
.vq
;
317 struct scatterlist
*sgs
[3], vcmd
, vresp
;
318 int outcnt
= 0, incnt
= 0;
322 if (!vgdev
->vqs_ready
)
325 sg_init_one(&vcmd
, vbuf
->buf
, vbuf
->size
);
326 sgs
[outcnt
+ incnt
] = &vcmd
;
330 sgs
[outcnt
+ incnt
] = vout
;
334 if (vbuf
->resp_size
) {
335 sg_init_one(&vresp
, vbuf
->resp_buf
, vbuf
->resp_size
);
336 sgs
[outcnt
+ incnt
] = &vresp
;
341 ret
= virtqueue_add_sgs(vq
, sgs
, outcnt
, incnt
, vbuf
, GFP_ATOMIC
);
342 if (ret
== -ENOSPC
) {
343 spin_unlock(&vgdev
->ctrlq
.qlock
);
344 wait_event(vgdev
->ctrlq
.ack_queue
, vq
->num_free
>= outcnt
+ incnt
);
345 spin_lock(&vgdev
->ctrlq
.qlock
);
348 trace_virtio_gpu_cmd_queue(vq
,
349 (struct virtio_gpu_ctrl_hdr
*)vbuf
->buf
);
351 notify
= virtqueue_kick_prepare(vq
);
356 static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device
*vgdev
,
357 struct virtio_gpu_vbuffer
*vbuf
,
358 struct virtio_gpu_ctrl_hdr
*hdr
,
359 struct virtio_gpu_fence
*fence
)
361 struct virtqueue
*vq
= vgdev
->ctrlq
.vq
;
362 struct scatterlist
*vout
= NULL
, sg
;
363 struct sg_table
*sgt
= NULL
;
367 if (vbuf
->data_size
) {
368 if (is_vmalloc_addr(vbuf
->data_buf
)) {
369 sgt
= vmalloc_to_sgt(vbuf
->data_buf
, vbuf
->data_size
,
375 sg_init_one(&sg
, vbuf
->data_buf
, vbuf
->data_size
);
382 spin_lock(&vgdev
->ctrlq
.qlock
);
385 * Make sure we have enouth space in the virtqueue. If not
386 * wait here until we have.
388 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
389 * to wait for free space, which can result in fence ids being
390 * submitted out-of-order.
392 if (vq
->num_free
< 2 + outcnt
) {
393 spin_unlock(&vgdev
->ctrlq
.qlock
);
394 wait_event(vgdev
->ctrlq
.ack_queue
, vq
->num_free
>= 3);
399 virtio_gpu_fence_emit(vgdev
, hdr
, fence
);
401 virtio_gpu_array_add_fence(vbuf
->objs
, &fence
->f
);
402 virtio_gpu_array_unlock_resv(vbuf
->objs
);
405 notify
= virtio_gpu_queue_ctrl_buffer_locked(vgdev
, vbuf
, vout
);
406 spin_unlock(&vgdev
->ctrlq
.qlock
);
408 if (vgdev
->disable_notify
)
409 vgdev
->pending_notify
= true;
411 virtqueue_notify(vgdev
->ctrlq
.vq
);
420 void virtio_gpu_disable_notify(struct virtio_gpu_device
*vgdev
)
422 vgdev
->disable_notify
= true;
425 void virtio_gpu_enable_notify(struct virtio_gpu_device
*vgdev
)
427 vgdev
->disable_notify
= false;
429 if (!vgdev
->pending_notify
)
431 vgdev
->pending_notify
= false;
432 virtqueue_notify(vgdev
->ctrlq
.vq
);
435 static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device
*vgdev
,
436 struct virtio_gpu_vbuffer
*vbuf
)
438 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, NULL
, NULL
);
441 static void virtio_gpu_queue_cursor(struct virtio_gpu_device
*vgdev
,
442 struct virtio_gpu_vbuffer
*vbuf
)
444 struct virtqueue
*vq
= vgdev
->cursorq
.vq
;
445 struct scatterlist
*sgs
[1], ccmd
;
450 if (!vgdev
->vqs_ready
)
453 sg_init_one(&ccmd
, vbuf
->buf
, vbuf
->size
);
457 spin_lock(&vgdev
->cursorq
.qlock
);
459 ret
= virtqueue_add_sgs(vq
, sgs
, outcnt
, 0, vbuf
, GFP_ATOMIC
);
460 if (ret
== -ENOSPC
) {
461 spin_unlock(&vgdev
->cursorq
.qlock
);
462 wait_event(vgdev
->cursorq
.ack_queue
, vq
->num_free
>= outcnt
);
463 spin_lock(&vgdev
->cursorq
.qlock
);
466 trace_virtio_gpu_cmd_queue(vq
,
467 (struct virtio_gpu_ctrl_hdr
*)vbuf
->buf
);
469 notify
= virtqueue_kick_prepare(vq
);
472 spin_unlock(&vgdev
->cursorq
.qlock
);
475 virtqueue_notify(vq
);
478 /* just create gem objects for userspace and long lived objects,
479 * just use dma_alloced pages for the queue objects?
482 /* create a basic resource */
483 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device
*vgdev
,
484 struct virtio_gpu_object
*bo
,
485 struct virtio_gpu_object_params
*params
,
486 struct virtio_gpu_object_array
*objs
,
487 struct virtio_gpu_fence
*fence
)
489 struct virtio_gpu_resource_create_2d
*cmd_p
;
490 struct virtio_gpu_vbuffer
*vbuf
;
492 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
493 memset(cmd_p
, 0, sizeof(*cmd_p
));
496 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
);
497 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
498 cmd_p
->format
= cpu_to_le32(params
->format
);
499 cmd_p
->width
= cpu_to_le32(params
->width
);
500 cmd_p
->height
= cpu_to_le32(params
->height
);
502 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, &cmd_p
->hdr
, fence
);
506 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device
*vgdev
,
507 uint32_t resource_id
)
509 struct virtio_gpu_resource_unref
*cmd_p
;
510 struct virtio_gpu_vbuffer
*vbuf
;
512 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
513 memset(cmd_p
, 0, sizeof(*cmd_p
));
515 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF
);
516 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
518 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
521 static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device
*vgdev
,
522 uint32_t resource_id
,
523 struct virtio_gpu_fence
*fence
)
525 struct virtio_gpu_resource_detach_backing
*cmd_p
;
526 struct virtio_gpu_vbuffer
*vbuf
;
528 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
529 memset(cmd_p
, 0, sizeof(*cmd_p
));
531 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
);
532 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
534 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, &cmd_p
->hdr
, fence
);
537 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device
*vgdev
,
538 uint32_t scanout_id
, uint32_t resource_id
,
539 uint32_t width
, uint32_t height
,
540 uint32_t x
, uint32_t y
)
542 struct virtio_gpu_set_scanout
*cmd_p
;
543 struct virtio_gpu_vbuffer
*vbuf
;
545 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
546 memset(cmd_p
, 0, sizeof(*cmd_p
));
548 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT
);
549 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
550 cmd_p
->scanout_id
= cpu_to_le32(scanout_id
);
551 cmd_p
->r
.width
= cpu_to_le32(width
);
552 cmd_p
->r
.height
= cpu_to_le32(height
);
553 cmd_p
->r
.x
= cpu_to_le32(x
);
554 cmd_p
->r
.y
= cpu_to_le32(y
);
556 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
559 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device
*vgdev
,
560 uint32_t resource_id
,
561 uint32_t x
, uint32_t y
,
562 uint32_t width
, uint32_t height
)
564 struct virtio_gpu_resource_flush
*cmd_p
;
565 struct virtio_gpu_vbuffer
*vbuf
;
567 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
568 memset(cmd_p
, 0, sizeof(*cmd_p
));
570 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH
);
571 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
572 cmd_p
->r
.width
= cpu_to_le32(width
);
573 cmd_p
->r
.height
= cpu_to_le32(height
);
574 cmd_p
->r
.x
= cpu_to_le32(x
);
575 cmd_p
->r
.y
= cpu_to_le32(y
);
577 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
580 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device
*vgdev
,
582 uint32_t width
, uint32_t height
,
583 uint32_t x
, uint32_t y
,
584 struct virtio_gpu_object_array
*objs
,
585 struct virtio_gpu_fence
*fence
)
587 struct virtio_gpu_object
*bo
= gem_to_virtio_gpu_obj(objs
->objs
[0]);
588 struct virtio_gpu_transfer_to_host_2d
*cmd_p
;
589 struct virtio_gpu_vbuffer
*vbuf
;
590 bool use_dma_api
= !virtio_has_iommu_quirk(vgdev
->vdev
);
593 dma_sync_sg_for_device(vgdev
->vdev
->dev
.parent
,
594 bo
->pages
->sgl
, bo
->pages
->nents
,
597 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
598 memset(cmd_p
, 0, sizeof(*cmd_p
));
601 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
);
602 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
603 cmd_p
->offset
= cpu_to_le64(offset
);
604 cmd_p
->r
.width
= cpu_to_le32(width
);
605 cmd_p
->r
.height
= cpu_to_le32(height
);
606 cmd_p
->r
.x
= cpu_to_le32(x
);
607 cmd_p
->r
.y
= cpu_to_le32(y
);
609 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, &cmd_p
->hdr
, fence
);
613 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device
*vgdev
,
614 uint32_t resource_id
,
615 struct virtio_gpu_mem_entry
*ents
,
617 struct virtio_gpu_fence
*fence
)
619 struct virtio_gpu_resource_attach_backing
*cmd_p
;
620 struct virtio_gpu_vbuffer
*vbuf
;
622 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
623 memset(cmd_p
, 0, sizeof(*cmd_p
));
625 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
);
626 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
627 cmd_p
->nr_entries
= cpu_to_le32(nents
);
629 vbuf
->data_buf
= ents
;
630 vbuf
->data_size
= sizeof(*ents
) * nents
;
632 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, &cmd_p
->hdr
, fence
);
635 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device
*vgdev
,
636 struct virtio_gpu_vbuffer
*vbuf
)
638 struct virtio_gpu_resp_display_info
*resp
=
639 (struct virtio_gpu_resp_display_info
*)vbuf
->resp_buf
;
642 spin_lock(&vgdev
->display_info_lock
);
643 for (i
= 0; i
< vgdev
->num_scanouts
; i
++) {
644 vgdev
->outputs
[i
].info
= resp
->pmodes
[i
];
645 if (resp
->pmodes
[i
].enabled
) {
646 DRM_DEBUG("output %d: %dx%d+%d+%d", i
,
647 le32_to_cpu(resp
->pmodes
[i
].r
.width
),
648 le32_to_cpu(resp
->pmodes
[i
].r
.height
),
649 le32_to_cpu(resp
->pmodes
[i
].r
.x
),
650 le32_to_cpu(resp
->pmodes
[i
].r
.y
));
652 DRM_DEBUG("output %d: disabled", i
);
656 vgdev
->display_info_pending
= false;
657 spin_unlock(&vgdev
->display_info_lock
);
658 wake_up(&vgdev
->resp_wq
);
660 if (!drm_helper_hpd_irq_event(vgdev
->ddev
))
661 drm_kms_helper_hotplug_event(vgdev
->ddev
);
664 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device
*vgdev
,
665 struct virtio_gpu_vbuffer
*vbuf
)
667 struct virtio_gpu_get_capset_info
*cmd
=
668 (struct virtio_gpu_get_capset_info
*)vbuf
->buf
;
669 struct virtio_gpu_resp_capset_info
*resp
=
670 (struct virtio_gpu_resp_capset_info
*)vbuf
->resp_buf
;
671 int i
= le32_to_cpu(cmd
->capset_index
);
673 spin_lock(&vgdev
->display_info_lock
);
674 vgdev
->capsets
[i
].id
= le32_to_cpu(resp
->capset_id
);
675 vgdev
->capsets
[i
].max_version
= le32_to_cpu(resp
->capset_max_version
);
676 vgdev
->capsets
[i
].max_size
= le32_to_cpu(resp
->capset_max_size
);
677 spin_unlock(&vgdev
->display_info_lock
);
678 wake_up(&vgdev
->resp_wq
);
681 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device
*vgdev
,
682 struct virtio_gpu_vbuffer
*vbuf
)
684 struct virtio_gpu_get_capset
*cmd
=
685 (struct virtio_gpu_get_capset
*)vbuf
->buf
;
686 struct virtio_gpu_resp_capset
*resp
=
687 (struct virtio_gpu_resp_capset
*)vbuf
->resp_buf
;
688 struct virtio_gpu_drv_cap_cache
*cache_ent
;
690 spin_lock(&vgdev
->display_info_lock
);
691 list_for_each_entry(cache_ent
, &vgdev
->cap_cache
, head
) {
692 if (cache_ent
->version
== le32_to_cpu(cmd
->capset_version
) &&
693 cache_ent
->id
== le32_to_cpu(cmd
->capset_id
)) {
694 memcpy(cache_ent
->caps_cache
, resp
->capset_data
,
696 /* Copy must occur before is_valid is signalled. */
698 atomic_set(&cache_ent
->is_valid
, 1);
702 spin_unlock(&vgdev
->display_info_lock
);
703 wake_up_all(&vgdev
->resp_wq
);
706 static int virtio_get_edid_block(void *data
, u8
*buf
,
707 unsigned int block
, size_t len
)
709 struct virtio_gpu_resp_edid
*resp
= data
;
710 size_t start
= block
* EDID_LENGTH
;
712 if (start
+ len
> le32_to_cpu(resp
->size
))
714 memcpy(buf
, resp
->edid
+ start
, len
);
718 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device
*vgdev
,
719 struct virtio_gpu_vbuffer
*vbuf
)
721 struct virtio_gpu_cmd_get_edid
*cmd
=
722 (struct virtio_gpu_cmd_get_edid
*)vbuf
->buf
;
723 struct virtio_gpu_resp_edid
*resp
=
724 (struct virtio_gpu_resp_edid
*)vbuf
->resp_buf
;
725 uint32_t scanout
= le32_to_cpu(cmd
->scanout
);
726 struct virtio_gpu_output
*output
;
727 struct edid
*new_edid
, *old_edid
;
729 if (scanout
>= vgdev
->num_scanouts
)
731 output
= vgdev
->outputs
+ scanout
;
733 new_edid
= drm_do_get_edid(&output
->conn
, virtio_get_edid_block
, resp
);
734 drm_connector_update_edid_property(&output
->conn
, new_edid
);
736 spin_lock(&vgdev
->display_info_lock
);
737 old_edid
= output
->edid
;
738 output
->edid
= new_edid
;
739 spin_unlock(&vgdev
->display_info_lock
);
742 wake_up(&vgdev
->resp_wq
);
745 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device
*vgdev
)
747 struct virtio_gpu_ctrl_hdr
*cmd_p
;
748 struct virtio_gpu_vbuffer
*vbuf
;
751 resp_buf
= kzalloc(sizeof(struct virtio_gpu_resp_display_info
),
756 cmd_p
= virtio_gpu_alloc_cmd_resp
757 (vgdev
, &virtio_gpu_cmd_get_display_info_cb
, &vbuf
,
758 sizeof(*cmd_p
), sizeof(struct virtio_gpu_resp_display_info
),
760 memset(cmd_p
, 0, sizeof(*cmd_p
));
762 vgdev
->display_info_pending
= true;
763 cmd_p
->type
= cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO
);
764 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
768 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device
*vgdev
, int idx
)
770 struct virtio_gpu_get_capset_info
*cmd_p
;
771 struct virtio_gpu_vbuffer
*vbuf
;
774 resp_buf
= kzalloc(sizeof(struct virtio_gpu_resp_capset_info
),
779 cmd_p
= virtio_gpu_alloc_cmd_resp
780 (vgdev
, &virtio_gpu_cmd_get_capset_info_cb
, &vbuf
,
781 sizeof(*cmd_p
), sizeof(struct virtio_gpu_resp_capset_info
),
783 memset(cmd_p
, 0, sizeof(*cmd_p
));
785 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO
);
786 cmd_p
->capset_index
= cpu_to_le32(idx
);
787 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
791 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device
*vgdev
,
792 int idx
, int version
,
793 struct virtio_gpu_drv_cap_cache
**cache_p
)
795 struct virtio_gpu_get_capset
*cmd_p
;
796 struct virtio_gpu_vbuffer
*vbuf
;
798 struct virtio_gpu_drv_cap_cache
*cache_ent
;
799 struct virtio_gpu_drv_cap_cache
*search_ent
;
804 if (idx
>= vgdev
->num_capsets
)
807 if (version
> vgdev
->capsets
[idx
].max_version
)
810 cache_ent
= kzalloc(sizeof(*cache_ent
), GFP_KERNEL
);
814 max_size
= vgdev
->capsets
[idx
].max_size
;
815 cache_ent
->caps_cache
= kmalloc(max_size
, GFP_KERNEL
);
816 if (!cache_ent
->caps_cache
) {
821 resp_buf
= kzalloc(sizeof(struct virtio_gpu_resp_capset
) + max_size
,
824 kfree(cache_ent
->caps_cache
);
829 cache_ent
->version
= version
;
830 cache_ent
->id
= vgdev
->capsets
[idx
].id
;
831 atomic_set(&cache_ent
->is_valid
, 0);
832 cache_ent
->size
= max_size
;
833 spin_lock(&vgdev
->display_info_lock
);
834 /* Search while under lock in case it was added by another task. */
835 list_for_each_entry(search_ent
, &vgdev
->cap_cache
, head
) {
836 if (search_ent
->id
== vgdev
->capsets
[idx
].id
&&
837 search_ent
->version
== version
) {
838 *cache_p
= search_ent
;
843 list_add_tail(&cache_ent
->head
, &vgdev
->cap_cache
);
844 spin_unlock(&vgdev
->display_info_lock
);
847 /* Entry was found, so free everything that was just created. */
849 kfree(cache_ent
->caps_cache
);
854 cmd_p
= virtio_gpu_alloc_cmd_resp
855 (vgdev
, &virtio_gpu_cmd_capset_cb
, &vbuf
, sizeof(*cmd_p
),
856 sizeof(struct virtio_gpu_resp_capset
) + max_size
,
858 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET
);
859 cmd_p
->capset_id
= cpu_to_le32(vgdev
->capsets
[idx
].id
);
860 cmd_p
->capset_version
= cpu_to_le32(version
);
861 *cache_p
= cache_ent
;
862 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
867 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device
*vgdev
)
869 struct virtio_gpu_cmd_get_edid
*cmd_p
;
870 struct virtio_gpu_vbuffer
*vbuf
;
874 if (WARN_ON(!vgdev
->has_edid
))
877 for (scanout
= 0; scanout
< vgdev
->num_scanouts
; scanout
++) {
878 resp_buf
= kzalloc(sizeof(struct virtio_gpu_resp_edid
),
883 cmd_p
= virtio_gpu_alloc_cmd_resp
884 (vgdev
, &virtio_gpu_cmd_get_edid_cb
, &vbuf
,
885 sizeof(*cmd_p
), sizeof(struct virtio_gpu_resp_edid
),
887 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID
);
888 cmd_p
->scanout
= cpu_to_le32(scanout
);
889 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
895 void virtio_gpu_cmd_context_create(struct virtio_gpu_device
*vgdev
, uint32_t id
,
896 uint32_t nlen
, const char *name
)
898 struct virtio_gpu_ctx_create
*cmd_p
;
899 struct virtio_gpu_vbuffer
*vbuf
;
901 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
902 memset(cmd_p
, 0, sizeof(*cmd_p
));
904 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE
);
905 cmd_p
->hdr
.ctx_id
= cpu_to_le32(id
);
906 cmd_p
->nlen
= cpu_to_le32(nlen
);
907 strncpy(cmd_p
->debug_name
, name
, sizeof(cmd_p
->debug_name
) - 1);
908 cmd_p
->debug_name
[sizeof(cmd_p
->debug_name
) - 1] = 0;
909 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
912 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device
*vgdev
,
915 struct virtio_gpu_ctx_destroy
*cmd_p
;
916 struct virtio_gpu_vbuffer
*vbuf
;
918 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
919 memset(cmd_p
, 0, sizeof(*cmd_p
));
921 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY
);
922 cmd_p
->hdr
.ctx_id
= cpu_to_le32(id
);
923 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
926 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device
*vgdev
,
928 struct virtio_gpu_object_array
*objs
)
930 struct virtio_gpu_object
*bo
= gem_to_virtio_gpu_obj(objs
->objs
[0]);
931 struct virtio_gpu_ctx_resource
*cmd_p
;
932 struct virtio_gpu_vbuffer
*vbuf
;
934 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
935 memset(cmd_p
, 0, sizeof(*cmd_p
));
938 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE
);
939 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
940 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
941 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
945 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device
*vgdev
,
947 struct virtio_gpu_object_array
*objs
)
949 struct virtio_gpu_object
*bo
= gem_to_virtio_gpu_obj(objs
->objs
[0]);
950 struct virtio_gpu_ctx_resource
*cmd_p
;
951 struct virtio_gpu_vbuffer
*vbuf
;
953 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
954 memset(cmd_p
, 0, sizeof(*cmd_p
));
957 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE
);
958 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
959 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
960 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
964 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device
*vgdev
,
965 struct virtio_gpu_object
*bo
,
966 struct virtio_gpu_object_params
*params
,
967 struct virtio_gpu_object_array
*objs
,
968 struct virtio_gpu_fence
*fence
)
970 struct virtio_gpu_resource_create_3d
*cmd_p
;
971 struct virtio_gpu_vbuffer
*vbuf
;
973 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
974 memset(cmd_p
, 0, sizeof(*cmd_p
));
977 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D
);
978 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
979 cmd_p
->format
= cpu_to_le32(params
->format
);
980 cmd_p
->width
= cpu_to_le32(params
->width
);
981 cmd_p
->height
= cpu_to_le32(params
->height
);
983 cmd_p
->target
= cpu_to_le32(params
->target
);
984 cmd_p
->bind
= cpu_to_le32(params
->bind
);
985 cmd_p
->depth
= cpu_to_le32(params
->depth
);
986 cmd_p
->array_size
= cpu_to_le32(params
->array_size
);
987 cmd_p
->last_level
= cpu_to_le32(params
->last_level
);
988 cmd_p
->nr_samples
= cpu_to_le32(params
->nr_samples
);
989 cmd_p
->flags
= cpu_to_le32(params
->flags
);
991 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, &cmd_p
->hdr
, fence
);
995 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device
*vgdev
,
997 uint64_t offset
, uint32_t level
,
998 struct drm_virtgpu_3d_box
*box
,
999 struct virtio_gpu_object_array
*objs
,
1000 struct virtio_gpu_fence
*fence
)
1002 struct virtio_gpu_object
*bo
= gem_to_virtio_gpu_obj(objs
->objs
[0]);
1003 struct virtio_gpu_transfer_host_3d
*cmd_p
;
1004 struct virtio_gpu_vbuffer
*vbuf
;
1005 bool use_dma_api
= !virtio_has_iommu_quirk(vgdev
->vdev
);
1008 dma_sync_sg_for_device(vgdev
->vdev
->dev
.parent
,
1009 bo
->pages
->sgl
, bo
->pages
->nents
,
1012 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
1013 memset(cmd_p
, 0, sizeof(*cmd_p
));
1017 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D
);
1018 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
1019 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
1020 convert_to_hw_box(&cmd_p
->box
, box
);
1021 cmd_p
->offset
= cpu_to_le64(offset
);
1022 cmd_p
->level
= cpu_to_le32(level
);
1024 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, &cmd_p
->hdr
, fence
);
1027 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device
*vgdev
,
1029 uint64_t offset
, uint32_t level
,
1030 struct drm_virtgpu_3d_box
*box
,
1031 struct virtio_gpu_object_array
*objs
,
1032 struct virtio_gpu_fence
*fence
)
1034 struct virtio_gpu_object
*bo
= gem_to_virtio_gpu_obj(objs
->objs
[0]);
1035 struct virtio_gpu_transfer_host_3d
*cmd_p
;
1036 struct virtio_gpu_vbuffer
*vbuf
;
1038 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
1039 memset(cmd_p
, 0, sizeof(*cmd_p
));
1043 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D
);
1044 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
1045 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
1046 convert_to_hw_box(&cmd_p
->box
, box
);
1047 cmd_p
->offset
= cpu_to_le64(offset
);
1048 cmd_p
->level
= cpu_to_le32(level
);
1050 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, &cmd_p
->hdr
, fence
);
1053 void virtio_gpu_cmd_submit(struct virtio_gpu_device
*vgdev
,
1054 void *data
, uint32_t data_size
,
1056 struct virtio_gpu_object_array
*objs
,
1057 struct virtio_gpu_fence
*fence
)
1059 struct virtio_gpu_cmd_submit
*cmd_p
;
1060 struct virtio_gpu_vbuffer
*vbuf
;
1062 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
1063 memset(cmd_p
, 0, sizeof(*cmd_p
));
1065 vbuf
->data_buf
= data
;
1066 vbuf
->data_size
= data_size
;
1069 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D
);
1070 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
1071 cmd_p
->size
= cpu_to_le32(data_size
);
1073 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, &cmd_p
->hdr
, fence
);
1076 int virtio_gpu_object_attach(struct virtio_gpu_device
*vgdev
,
1077 struct virtio_gpu_object
*obj
,
1078 struct virtio_gpu_fence
*fence
)
1080 bool use_dma_api
= !virtio_has_iommu_quirk(vgdev
->vdev
);
1081 struct virtio_gpu_mem_entry
*ents
;
1082 struct scatterlist
*sg
;
1085 if (WARN_ON_ONCE(!obj
->created
))
1087 if (WARN_ON_ONCE(obj
->pages
))
1090 ret
= drm_gem_shmem_pin(&obj
->base
.base
);
1094 obj
->pages
= drm_gem_shmem_get_sg_table(&obj
->base
.base
);
1095 if (obj
->pages
== NULL
) {
1096 drm_gem_shmem_unpin(&obj
->base
.base
);
1101 obj
->mapped
= dma_map_sg(vgdev
->vdev
->dev
.parent
,
1102 obj
->pages
->sgl
, obj
->pages
->nents
,
1104 nents
= obj
->mapped
;
1106 nents
= obj
->pages
->nents
;
1109 /* gets freed when the ring has consumed it */
1110 ents
= kmalloc_array(nents
, sizeof(struct virtio_gpu_mem_entry
),
1113 DRM_ERROR("failed to allocate ent list\n");
1117 for_each_sg(obj
->pages
->sgl
, sg
, nents
, si
) {
1118 ents
[si
].addr
= cpu_to_le64(use_dma_api
1119 ? sg_dma_address(sg
)
1121 ents
[si
].length
= cpu_to_le32(sg
->length
);
1122 ents
[si
].padding
= 0;
1125 virtio_gpu_cmd_resource_attach_backing(vgdev
, obj
->hw_res_handle
,
1131 void virtio_gpu_object_detach(struct virtio_gpu_device
*vgdev
,
1132 struct virtio_gpu_object
*obj
)
1134 bool use_dma_api
= !virtio_has_iommu_quirk(vgdev
->vdev
);
1136 if (WARN_ON_ONCE(!obj
->pages
))
1139 if (use_dma_api
&& obj
->mapped
) {
1140 struct virtio_gpu_fence
*fence
= virtio_gpu_fence_alloc(vgdev
);
1141 /* detach backing and wait for the host process it ... */
1142 virtio_gpu_cmd_resource_inval_backing(vgdev
, obj
->hw_res_handle
, fence
);
1143 dma_fence_wait(&fence
->f
, true);
1144 dma_fence_put(&fence
->f
);
1146 /* ... then tear down iommu mappings */
1147 dma_unmap_sg(vgdev
->vdev
->dev
.parent
,
1148 obj
->pages
->sgl
, obj
->mapped
,
1152 virtio_gpu_cmd_resource_inval_backing(vgdev
, obj
->hw_res_handle
, NULL
);
1155 sg_free_table(obj
->pages
);
1158 drm_gem_shmem_unpin(&obj
->base
.base
);
1161 void virtio_gpu_cursor_ping(struct virtio_gpu_device
*vgdev
,
1162 struct virtio_gpu_output
*output
)
1164 struct virtio_gpu_vbuffer
*vbuf
;
1165 struct virtio_gpu_update_cursor
*cur_p
;
1167 output
->cursor
.pos
.scanout_id
= cpu_to_le32(output
->index
);
1168 cur_p
= virtio_gpu_alloc_cursor(vgdev
, &vbuf
);
1169 memcpy(cur_p
, &output
->cursor
, sizeof(output
->cursor
));
1170 virtio_gpu_queue_cursor(vgdev
, vbuf
);