2 * Copyright (C) 2015 Red Hat, Inc.
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
30 #include "virtgpu_drv.h"
31 #include <linux/virtio.h>
32 #include <linux/virtio_config.h>
33 #include <linux/virtio_ring.h>
35 #define MAX_INLINE_CMD_SIZE 96
36 #define MAX_INLINE_RESP_SIZE 24
37 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
38 + MAX_INLINE_CMD_SIZE \
39 + MAX_INLINE_RESP_SIZE)
41 void virtio_gpu_resource_id_get(struct virtio_gpu_device
*vgdev
,
46 idr_preload(GFP_KERNEL
);
47 spin_lock(&vgdev
->resource_idr_lock
);
48 handle
= idr_alloc(&vgdev
->resource_idr
, NULL
, 1, 0, GFP_NOWAIT
);
49 spin_unlock(&vgdev
->resource_idr_lock
);
54 void virtio_gpu_resource_id_put(struct virtio_gpu_device
*vgdev
, uint32_t id
)
56 spin_lock(&vgdev
->resource_idr_lock
);
57 idr_remove(&vgdev
->resource_idr
, id
);
58 spin_unlock(&vgdev
->resource_idr_lock
);
61 void virtio_gpu_ctrl_ack(struct virtqueue
*vq
)
63 struct drm_device
*dev
= vq
->vdev
->priv
;
64 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
65 schedule_work(&vgdev
->ctrlq
.dequeue_work
);
68 void virtio_gpu_cursor_ack(struct virtqueue
*vq
)
70 struct drm_device
*dev
= vq
->vdev
->priv
;
71 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
72 schedule_work(&vgdev
->cursorq
.dequeue_work
);
75 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device
*vgdev
)
77 struct virtio_gpu_vbuffer
*vbuf
;
78 int i
, size
, count
= 0;
81 INIT_LIST_HEAD(&vgdev
->free_vbufs
);
82 spin_lock_init(&vgdev
->free_vbufs_lock
);
83 count
+= virtqueue_get_vring_size(vgdev
->ctrlq
.vq
);
84 count
+= virtqueue_get_vring_size(vgdev
->cursorq
.vq
);
85 size
= count
* VBUFFER_SIZE
;
86 DRM_INFO("virtio vbuffers: %d bufs, %zdB each, %dkB total.\n",
87 count
, VBUFFER_SIZE
, size
/ 1024);
89 vgdev
->vbufs
= kzalloc(size
, GFP_KERNEL
);
93 for (i
= 0, ptr
= vgdev
->vbufs
;
95 i
++, ptr
+= VBUFFER_SIZE
) {
97 list_add(&vbuf
->list
, &vgdev
->free_vbufs
);
102 void virtio_gpu_free_vbufs(struct virtio_gpu_device
*vgdev
)
104 struct virtio_gpu_vbuffer
*vbuf
;
107 count
+= virtqueue_get_vring_size(vgdev
->ctrlq
.vq
);
108 count
+= virtqueue_get_vring_size(vgdev
->cursorq
.vq
);
110 spin_lock(&vgdev
->free_vbufs_lock
);
111 for (i
= 0; i
< count
; i
++) {
112 if (WARN_ON(list_empty(&vgdev
->free_vbufs
)))
114 vbuf
= list_first_entry(&vgdev
->free_vbufs
,
115 struct virtio_gpu_vbuffer
, list
);
116 list_del(&vbuf
->list
);
118 spin_unlock(&vgdev
->free_vbufs_lock
);
122 static struct virtio_gpu_vbuffer
*
123 virtio_gpu_get_vbuf(struct virtio_gpu_device
*vgdev
,
124 int size
, int resp_size
, void *resp_buf
,
125 virtio_gpu_resp_cb resp_cb
)
127 struct virtio_gpu_vbuffer
*vbuf
;
129 spin_lock(&vgdev
->free_vbufs_lock
);
130 BUG_ON(list_empty(&vgdev
->free_vbufs
));
131 vbuf
= list_first_entry(&vgdev
->free_vbufs
,
132 struct virtio_gpu_vbuffer
, list
);
133 list_del(&vbuf
->list
);
134 spin_unlock(&vgdev
->free_vbufs_lock
);
135 memset(vbuf
, 0, VBUFFER_SIZE
);
137 BUG_ON(size
> MAX_INLINE_CMD_SIZE
);
138 vbuf
->buf
= (void *)vbuf
+ sizeof(*vbuf
);
141 vbuf
->resp_cb
= resp_cb
;
142 vbuf
->resp_size
= resp_size
;
143 if (resp_size
<= MAX_INLINE_RESP_SIZE
)
144 vbuf
->resp_buf
= (void *)vbuf
->buf
+ size
;
146 vbuf
->resp_buf
= resp_buf
;
147 BUG_ON(!vbuf
->resp_buf
);
151 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device
*vgdev
,
152 struct virtio_gpu_vbuffer
**vbuffer_p
,
155 struct virtio_gpu_vbuffer
*vbuf
;
157 vbuf
= virtio_gpu_get_vbuf(vgdev
, size
,
158 sizeof(struct virtio_gpu_ctrl_hdr
),
162 return ERR_CAST(vbuf
);
168 static struct virtio_gpu_update_cursor
*
169 virtio_gpu_alloc_cursor(struct virtio_gpu_device
*vgdev
,
170 struct virtio_gpu_vbuffer
**vbuffer_p
)
172 struct virtio_gpu_vbuffer
*vbuf
;
174 vbuf
= virtio_gpu_get_vbuf
175 (vgdev
, sizeof(struct virtio_gpu_update_cursor
),
179 return ERR_CAST(vbuf
);
182 return (struct virtio_gpu_update_cursor
*)vbuf
->buf
;
185 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device
*vgdev
,
186 virtio_gpu_resp_cb cb
,
187 struct virtio_gpu_vbuffer
**vbuffer_p
,
188 int cmd_size
, int resp_size
,
191 struct virtio_gpu_vbuffer
*vbuf
;
193 vbuf
= virtio_gpu_get_vbuf(vgdev
, cmd_size
,
194 resp_size
, resp_buf
, cb
);
197 return ERR_CAST(vbuf
);
200 return (struct virtio_gpu_command
*)vbuf
->buf
;
203 static void free_vbuf(struct virtio_gpu_device
*vgdev
,
204 struct virtio_gpu_vbuffer
*vbuf
)
206 if (vbuf
->resp_size
> MAX_INLINE_RESP_SIZE
)
207 kfree(vbuf
->resp_buf
);
208 kfree(vbuf
->data_buf
);
209 spin_lock(&vgdev
->free_vbufs_lock
);
210 list_add(&vbuf
->list
, &vgdev
->free_vbufs
);
211 spin_unlock(&vgdev
->free_vbufs_lock
);
214 static void reclaim_vbufs(struct virtqueue
*vq
, struct list_head
*reclaim_list
)
216 struct virtio_gpu_vbuffer
*vbuf
;
220 while ((vbuf
= virtqueue_get_buf(vq
, &len
))) {
221 list_add_tail(&vbuf
->list
, reclaim_list
);
225 DRM_DEBUG("Huh? zero vbufs reclaimed");
228 void virtio_gpu_dequeue_ctrl_func(struct work_struct
*work
)
230 struct virtio_gpu_device
*vgdev
=
231 container_of(work
, struct virtio_gpu_device
,
233 struct list_head reclaim_list
;
234 struct virtio_gpu_vbuffer
*entry
, *tmp
;
235 struct virtio_gpu_ctrl_hdr
*resp
;
238 INIT_LIST_HEAD(&reclaim_list
);
239 spin_lock(&vgdev
->ctrlq
.qlock
);
241 virtqueue_disable_cb(vgdev
->ctrlq
.vq
);
242 reclaim_vbufs(vgdev
->ctrlq
.vq
, &reclaim_list
);
244 } while (!virtqueue_enable_cb(vgdev
->ctrlq
.vq
));
245 spin_unlock(&vgdev
->ctrlq
.qlock
);
247 list_for_each_entry_safe(entry
, tmp
, &reclaim_list
, list
) {
248 resp
= (struct virtio_gpu_ctrl_hdr
*)entry
->resp_buf
;
249 if (resp
->type
!= cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA
))
250 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp
->type
));
251 if (resp
->flags
& cpu_to_le32(VIRTIO_GPU_FLAG_FENCE
)) {
252 u64 f
= le64_to_cpu(resp
->fence_id
);
255 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
256 __func__
, fence_id
, f
);
262 entry
->resp_cb(vgdev
, entry
);
264 list_del(&entry
->list
);
265 free_vbuf(vgdev
, entry
);
267 wake_up(&vgdev
->ctrlq
.ack_queue
);
270 virtio_gpu_fence_event_process(vgdev
, fence_id
);
273 void virtio_gpu_dequeue_cursor_func(struct work_struct
*work
)
275 struct virtio_gpu_device
*vgdev
=
276 container_of(work
, struct virtio_gpu_device
,
277 cursorq
.dequeue_work
);
278 struct list_head reclaim_list
;
279 struct virtio_gpu_vbuffer
*entry
, *tmp
;
281 INIT_LIST_HEAD(&reclaim_list
);
282 spin_lock(&vgdev
->cursorq
.qlock
);
284 virtqueue_disable_cb(vgdev
->cursorq
.vq
);
285 reclaim_vbufs(vgdev
->cursorq
.vq
, &reclaim_list
);
286 } while (!virtqueue_enable_cb(vgdev
->cursorq
.vq
));
287 spin_unlock(&vgdev
->cursorq
.qlock
);
289 list_for_each_entry_safe(entry
, tmp
, &reclaim_list
, list
) {
290 list_del(&entry
->list
);
291 free_vbuf(vgdev
, entry
);
293 wake_up(&vgdev
->cursorq
.ack_queue
);
296 static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device
*vgdev
,
297 struct virtio_gpu_vbuffer
*vbuf
)
299 struct virtqueue
*vq
= vgdev
->ctrlq
.vq
;
300 struct scatterlist
*sgs
[3], vcmd
, vout
, vresp
;
301 int outcnt
= 0, incnt
= 0;
304 if (!vgdev
->vqs_ready
)
307 sg_init_one(&vcmd
, vbuf
->buf
, vbuf
->size
);
308 sgs
[outcnt
+incnt
] = &vcmd
;
311 if (vbuf
->data_size
) {
312 sg_init_one(&vout
, vbuf
->data_buf
, vbuf
->data_size
);
313 sgs
[outcnt
+ incnt
] = &vout
;
317 if (vbuf
->resp_size
) {
318 sg_init_one(&vresp
, vbuf
->resp_buf
, vbuf
->resp_size
);
319 sgs
[outcnt
+ incnt
] = &vresp
;
324 ret
= virtqueue_add_sgs(vq
, sgs
, outcnt
, incnt
, vbuf
, GFP_ATOMIC
);
325 if (ret
== -ENOSPC
) {
326 spin_unlock(&vgdev
->ctrlq
.qlock
);
327 wait_event(vgdev
->ctrlq
.ack_queue
, vq
->num_free
);
328 spin_lock(&vgdev
->ctrlq
.qlock
);
339 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device
*vgdev
,
340 struct virtio_gpu_vbuffer
*vbuf
)
344 spin_lock(&vgdev
->ctrlq
.qlock
);
345 rc
= virtio_gpu_queue_ctrl_buffer_locked(vgdev
, vbuf
);
346 spin_unlock(&vgdev
->ctrlq
.qlock
);
350 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device
*vgdev
,
351 struct virtio_gpu_vbuffer
*vbuf
,
352 struct virtio_gpu_ctrl_hdr
*hdr
,
353 struct virtio_gpu_fence
**fence
)
355 struct virtqueue
*vq
= vgdev
->ctrlq
.vq
;
359 spin_lock(&vgdev
->ctrlq
.qlock
);
362 * Make sure we have enouth space in the virtqueue. If not
363 * wait here until we have.
365 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
366 * to wait for free space, which can result in fence ids being
367 * submitted out-of-order.
369 if (vq
->num_free
< 3) {
370 spin_unlock(&vgdev
->ctrlq
.qlock
);
371 wait_event(vgdev
->ctrlq
.ack_queue
, vq
->num_free
>= 3);
376 virtio_gpu_fence_emit(vgdev
, hdr
, fence
);
377 rc
= virtio_gpu_queue_ctrl_buffer_locked(vgdev
, vbuf
);
378 spin_unlock(&vgdev
->ctrlq
.qlock
);
382 static int virtio_gpu_queue_cursor(struct virtio_gpu_device
*vgdev
,
383 struct virtio_gpu_vbuffer
*vbuf
)
385 struct virtqueue
*vq
= vgdev
->cursorq
.vq
;
386 struct scatterlist
*sgs
[1], ccmd
;
390 if (!vgdev
->vqs_ready
)
393 sg_init_one(&ccmd
, vbuf
->buf
, vbuf
->size
);
397 spin_lock(&vgdev
->cursorq
.qlock
);
399 ret
= virtqueue_add_sgs(vq
, sgs
, outcnt
, 0, vbuf
, GFP_ATOMIC
);
400 if (ret
== -ENOSPC
) {
401 spin_unlock(&vgdev
->cursorq
.qlock
);
402 wait_event(vgdev
->cursorq
.ack_queue
, vq
->num_free
);
403 spin_lock(&vgdev
->cursorq
.qlock
);
409 spin_unlock(&vgdev
->cursorq
.qlock
);
416 /* just create gem objects for userspace and long lived objects,
417 just use dma_alloced pages for the queue objects? */
419 /* create a basic resource */
420 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device
*vgdev
,
421 uint32_t resource_id
,
426 struct virtio_gpu_resource_create_2d
*cmd_p
;
427 struct virtio_gpu_vbuffer
*vbuf
;
429 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
430 memset(cmd_p
, 0, sizeof(*cmd_p
));
432 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
);
433 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
434 cmd_p
->format
= cpu_to_le32(format
);
435 cmd_p
->width
= cpu_to_le32(width
);
436 cmd_p
->height
= cpu_to_le32(height
);
438 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
441 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device
*vgdev
,
442 uint32_t resource_id
)
444 struct virtio_gpu_resource_unref
*cmd_p
;
445 struct virtio_gpu_vbuffer
*vbuf
;
447 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
448 memset(cmd_p
, 0, sizeof(*cmd_p
));
450 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF
);
451 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
453 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
456 void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device
*vgdev
,
457 uint32_t resource_id
)
459 struct virtio_gpu_resource_detach_backing
*cmd_p
;
460 struct virtio_gpu_vbuffer
*vbuf
;
462 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
463 memset(cmd_p
, 0, sizeof(*cmd_p
));
465 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
);
466 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
468 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
471 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device
*vgdev
,
472 uint32_t scanout_id
, uint32_t resource_id
,
473 uint32_t width
, uint32_t height
,
474 uint32_t x
, uint32_t y
)
476 struct virtio_gpu_set_scanout
*cmd_p
;
477 struct virtio_gpu_vbuffer
*vbuf
;
479 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
480 memset(cmd_p
, 0, sizeof(*cmd_p
));
482 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT
);
483 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
484 cmd_p
->scanout_id
= cpu_to_le32(scanout_id
);
485 cmd_p
->r
.width
= cpu_to_le32(width
);
486 cmd_p
->r
.height
= cpu_to_le32(height
);
487 cmd_p
->r
.x
= cpu_to_le32(x
);
488 cmd_p
->r
.y
= cpu_to_le32(y
);
490 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
493 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device
*vgdev
,
494 uint32_t resource_id
,
495 uint32_t x
, uint32_t y
,
496 uint32_t width
, uint32_t height
)
498 struct virtio_gpu_resource_flush
*cmd_p
;
499 struct virtio_gpu_vbuffer
*vbuf
;
501 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
502 memset(cmd_p
, 0, sizeof(*cmd_p
));
504 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH
);
505 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
506 cmd_p
->r
.width
= cpu_to_le32(width
);
507 cmd_p
->r
.height
= cpu_to_le32(height
);
508 cmd_p
->r
.x
= cpu_to_le32(x
);
509 cmd_p
->r
.y
= cpu_to_le32(y
);
511 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
514 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device
*vgdev
,
515 uint32_t resource_id
, uint64_t offset
,
516 __le32 width
, __le32 height
,
518 struct virtio_gpu_fence
**fence
)
520 struct virtio_gpu_transfer_to_host_2d
*cmd_p
;
521 struct virtio_gpu_vbuffer
*vbuf
;
523 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
524 memset(cmd_p
, 0, sizeof(*cmd_p
));
526 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
);
527 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
528 cmd_p
->offset
= cpu_to_le64(offset
);
529 cmd_p
->r
.width
= width
;
530 cmd_p
->r
.height
= height
;
534 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, &cmd_p
->hdr
, fence
);
538 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device
*vgdev
,
539 uint32_t resource_id
,
540 struct virtio_gpu_mem_entry
*ents
,
542 struct virtio_gpu_fence
**fence
)
544 struct virtio_gpu_resource_attach_backing
*cmd_p
;
545 struct virtio_gpu_vbuffer
*vbuf
;
547 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
548 memset(cmd_p
, 0, sizeof(*cmd_p
));
550 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
);
551 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
552 cmd_p
->nr_entries
= cpu_to_le32(nents
);
554 vbuf
->data_buf
= ents
;
555 vbuf
->data_size
= sizeof(*ents
) * nents
;
557 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, &cmd_p
->hdr
, fence
);
560 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device
*vgdev
,
561 struct virtio_gpu_vbuffer
*vbuf
)
563 struct virtio_gpu_resp_display_info
*resp
=
564 (struct virtio_gpu_resp_display_info
*)vbuf
->resp_buf
;
567 spin_lock(&vgdev
->display_info_lock
);
568 for (i
= 0; i
< vgdev
->num_scanouts
; i
++) {
569 vgdev
->outputs
[i
].info
= resp
->pmodes
[i
];
570 if (resp
->pmodes
[i
].enabled
) {
571 DRM_DEBUG("output %d: %dx%d+%d+%d", i
,
572 le32_to_cpu(resp
->pmodes
[i
].r
.width
),
573 le32_to_cpu(resp
->pmodes
[i
].r
.height
),
574 le32_to_cpu(resp
->pmodes
[i
].r
.x
),
575 le32_to_cpu(resp
->pmodes
[i
].r
.y
));
577 DRM_DEBUG("output %d: disabled", i
);
581 vgdev
->display_info_pending
= false;
582 spin_unlock(&vgdev
->display_info_lock
);
583 wake_up(&vgdev
->resp_wq
);
585 if (!drm_helper_hpd_irq_event(vgdev
->ddev
))
586 drm_kms_helper_hotplug_event(vgdev
->ddev
);
589 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device
*vgdev
,
590 struct virtio_gpu_vbuffer
*vbuf
)
592 struct virtio_gpu_get_capset_info
*cmd
=
593 (struct virtio_gpu_get_capset_info
*)vbuf
->buf
;
594 struct virtio_gpu_resp_capset_info
*resp
=
595 (struct virtio_gpu_resp_capset_info
*)vbuf
->resp_buf
;
596 int i
= le32_to_cpu(cmd
->capset_index
);
598 spin_lock(&vgdev
->display_info_lock
);
599 vgdev
->capsets
[i
].id
= le32_to_cpu(resp
->capset_id
);
600 vgdev
->capsets
[i
].max_version
= le32_to_cpu(resp
->capset_max_version
);
601 vgdev
->capsets
[i
].max_size
= le32_to_cpu(resp
->capset_max_size
);
602 spin_unlock(&vgdev
->display_info_lock
);
603 wake_up(&vgdev
->resp_wq
);
606 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device
*vgdev
,
607 struct virtio_gpu_vbuffer
*vbuf
)
609 struct virtio_gpu_get_capset
*cmd
=
610 (struct virtio_gpu_get_capset
*)vbuf
->buf
;
611 struct virtio_gpu_resp_capset
*resp
=
612 (struct virtio_gpu_resp_capset
*)vbuf
->resp_buf
;
613 struct virtio_gpu_drv_cap_cache
*cache_ent
;
615 spin_lock(&vgdev
->display_info_lock
);
616 list_for_each_entry(cache_ent
, &vgdev
->cap_cache
, head
) {
617 if (cache_ent
->version
== le32_to_cpu(cmd
->capset_version
) &&
618 cache_ent
->id
== le32_to_cpu(cmd
->capset_id
)) {
619 memcpy(cache_ent
->caps_cache
, resp
->capset_data
,
621 atomic_set(&cache_ent
->is_valid
, 1);
625 spin_unlock(&vgdev
->display_info_lock
);
626 wake_up(&vgdev
->resp_wq
);
630 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device
*vgdev
)
632 struct virtio_gpu_ctrl_hdr
*cmd_p
;
633 struct virtio_gpu_vbuffer
*vbuf
;
636 resp_buf
= kzalloc(sizeof(struct virtio_gpu_resp_display_info
),
641 cmd_p
= virtio_gpu_alloc_cmd_resp
642 (vgdev
, &virtio_gpu_cmd_get_display_info_cb
, &vbuf
,
643 sizeof(*cmd_p
), sizeof(struct virtio_gpu_resp_display_info
),
645 memset(cmd_p
, 0, sizeof(*cmd_p
));
647 vgdev
->display_info_pending
= true;
648 cmd_p
->type
= cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO
);
649 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
653 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device
*vgdev
, int idx
)
655 struct virtio_gpu_get_capset_info
*cmd_p
;
656 struct virtio_gpu_vbuffer
*vbuf
;
659 resp_buf
= kzalloc(sizeof(struct virtio_gpu_resp_capset_info
),
664 cmd_p
= virtio_gpu_alloc_cmd_resp
665 (vgdev
, &virtio_gpu_cmd_get_capset_info_cb
, &vbuf
,
666 sizeof(*cmd_p
), sizeof(struct virtio_gpu_resp_capset_info
),
668 memset(cmd_p
, 0, sizeof(*cmd_p
));
670 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO
);
671 cmd_p
->capset_index
= cpu_to_le32(idx
);
672 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
676 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device
*vgdev
,
677 int idx
, int version
,
678 struct virtio_gpu_drv_cap_cache
**cache_p
)
680 struct virtio_gpu_get_capset
*cmd_p
;
681 struct virtio_gpu_vbuffer
*vbuf
;
682 int max_size
= vgdev
->capsets
[idx
].max_size
;
683 struct virtio_gpu_drv_cap_cache
*cache_ent
;
686 if (idx
> vgdev
->num_capsets
)
689 if (version
> vgdev
->capsets
[idx
].max_version
)
692 cache_ent
= kzalloc(sizeof(*cache_ent
), GFP_KERNEL
);
696 cache_ent
->caps_cache
= kmalloc(max_size
, GFP_KERNEL
);
697 if (!cache_ent
->caps_cache
) {
702 resp_buf
= kzalloc(sizeof(struct virtio_gpu_resp_capset
) + max_size
,
705 kfree(cache_ent
->caps_cache
);
710 cache_ent
->version
= version
;
711 cache_ent
->id
= vgdev
->capsets
[idx
].id
;
712 atomic_set(&cache_ent
->is_valid
, 0);
713 cache_ent
->size
= max_size
;
714 spin_lock(&vgdev
->display_info_lock
);
715 list_add_tail(&cache_ent
->head
, &vgdev
->cap_cache
);
716 spin_unlock(&vgdev
->display_info_lock
);
718 cmd_p
= virtio_gpu_alloc_cmd_resp
719 (vgdev
, &virtio_gpu_cmd_capset_cb
, &vbuf
, sizeof(*cmd_p
),
720 sizeof(struct virtio_gpu_resp_capset
) + max_size
,
722 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET
);
723 cmd_p
->capset_id
= cpu_to_le32(vgdev
->capsets
[idx
].id
);
724 cmd_p
->capset_version
= cpu_to_le32(version
);
725 *cache_p
= cache_ent
;
726 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
731 void virtio_gpu_cmd_context_create(struct virtio_gpu_device
*vgdev
, uint32_t id
,
732 uint32_t nlen
, const char *name
)
734 struct virtio_gpu_ctx_create
*cmd_p
;
735 struct virtio_gpu_vbuffer
*vbuf
;
737 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
738 memset(cmd_p
, 0, sizeof(*cmd_p
));
740 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE
);
741 cmd_p
->hdr
.ctx_id
= cpu_to_le32(id
);
742 cmd_p
->nlen
= cpu_to_le32(nlen
);
743 strncpy(cmd_p
->debug_name
, name
, sizeof(cmd_p
->debug_name
)-1);
744 cmd_p
->debug_name
[sizeof(cmd_p
->debug_name
)-1] = 0;
745 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
748 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device
*vgdev
,
751 struct virtio_gpu_ctx_destroy
*cmd_p
;
752 struct virtio_gpu_vbuffer
*vbuf
;
754 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
755 memset(cmd_p
, 0, sizeof(*cmd_p
));
757 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY
);
758 cmd_p
->hdr
.ctx_id
= cpu_to_le32(id
);
759 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
762 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device
*vgdev
,
764 uint32_t resource_id
)
766 struct virtio_gpu_ctx_resource
*cmd_p
;
767 struct virtio_gpu_vbuffer
*vbuf
;
769 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
770 memset(cmd_p
, 0, sizeof(*cmd_p
));
772 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE
);
773 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
774 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
775 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
779 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device
*vgdev
,
781 uint32_t resource_id
)
783 struct virtio_gpu_ctx_resource
*cmd_p
;
784 struct virtio_gpu_vbuffer
*vbuf
;
786 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
787 memset(cmd_p
, 0, sizeof(*cmd_p
));
789 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE
);
790 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
791 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
792 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
796 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device
*vgdev
,
797 struct virtio_gpu_resource_create_3d
*rc_3d
,
798 struct virtio_gpu_fence
**fence
)
800 struct virtio_gpu_resource_create_3d
*cmd_p
;
801 struct virtio_gpu_vbuffer
*vbuf
;
803 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
804 memset(cmd_p
, 0, sizeof(*cmd_p
));
807 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D
);
808 cmd_p
->hdr
.flags
= 0;
810 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, &cmd_p
->hdr
, fence
);
813 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device
*vgdev
,
814 uint32_t resource_id
, uint32_t ctx_id
,
815 uint64_t offset
, uint32_t level
,
816 struct virtio_gpu_box
*box
,
817 struct virtio_gpu_fence
**fence
)
819 struct virtio_gpu_transfer_host_3d
*cmd_p
;
820 struct virtio_gpu_vbuffer
*vbuf
;
822 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
823 memset(cmd_p
, 0, sizeof(*cmd_p
));
825 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D
);
826 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
827 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
829 cmd_p
->offset
= cpu_to_le64(offset
);
830 cmd_p
->level
= cpu_to_le32(level
);
832 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, &cmd_p
->hdr
, fence
);
835 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device
*vgdev
,
836 uint32_t resource_id
, uint32_t ctx_id
,
837 uint64_t offset
, uint32_t level
,
838 struct virtio_gpu_box
*box
,
839 struct virtio_gpu_fence
**fence
)
841 struct virtio_gpu_transfer_host_3d
*cmd_p
;
842 struct virtio_gpu_vbuffer
*vbuf
;
844 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
845 memset(cmd_p
, 0, sizeof(*cmd_p
));
847 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D
);
848 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
849 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
851 cmd_p
->offset
= cpu_to_le64(offset
);
852 cmd_p
->level
= cpu_to_le32(level
);
854 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, &cmd_p
->hdr
, fence
);
857 void virtio_gpu_cmd_submit(struct virtio_gpu_device
*vgdev
,
858 void *data
, uint32_t data_size
,
859 uint32_t ctx_id
, struct virtio_gpu_fence
**fence
)
861 struct virtio_gpu_cmd_submit
*cmd_p
;
862 struct virtio_gpu_vbuffer
*vbuf
;
864 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
865 memset(cmd_p
, 0, sizeof(*cmd_p
));
867 vbuf
->data_buf
= data
;
868 vbuf
->data_size
= data_size
;
870 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D
);
871 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
872 cmd_p
->size
= cpu_to_le32(data_size
);
874 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, &cmd_p
->hdr
, fence
);
877 int virtio_gpu_object_attach(struct virtio_gpu_device
*vgdev
,
878 struct virtio_gpu_object
*obj
,
879 uint32_t resource_id
,
880 struct virtio_gpu_fence
**fence
)
882 struct virtio_gpu_mem_entry
*ents
;
883 struct scatterlist
*sg
;
888 ret
= virtio_gpu_object_get_sg_table(vgdev
, obj
);
893 /* gets freed when the ring has consumed it */
894 ents
= kmalloc_array(obj
->pages
->nents
,
895 sizeof(struct virtio_gpu_mem_entry
),
898 DRM_ERROR("failed to allocate ent list\n");
902 for_each_sg(obj
->pages
->sgl
, sg
, obj
->pages
->nents
, si
) {
903 ents
[si
].addr
= cpu_to_le64(sg_phys(sg
));
904 ents
[si
].length
= cpu_to_le32(sg
->length
);
905 ents
[si
].padding
= 0;
908 virtio_gpu_cmd_resource_attach_backing(vgdev
, resource_id
,
909 ents
, obj
->pages
->nents
,
911 obj
->hw_res_handle
= resource_id
;
915 void virtio_gpu_cursor_ping(struct virtio_gpu_device
*vgdev
,
916 struct virtio_gpu_output
*output
)
918 struct virtio_gpu_vbuffer
*vbuf
;
919 struct virtio_gpu_update_cursor
*cur_p
;
921 output
->cursor
.pos
.scanout_id
= cpu_to_le32(output
->index
);
922 cur_p
= virtio_gpu_alloc_cursor(vgdev
, &vbuf
);
923 memcpy(cur_p
, &output
->cursor
, sizeof(output
->cursor
));
924 virtio_gpu_queue_cursor(vgdev
, vbuf
);