2 * Copyright (C) 2015 Red Hat, Inc.
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
30 #include "virtgpu_drv.h"
31 #include <linux/virtio.h>
32 #include <linux/virtio_config.h>
33 #include <linux/virtio_ring.h>
35 #define MAX_INLINE_CMD_SIZE 96
36 #define MAX_INLINE_RESP_SIZE 24
37 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
38 + MAX_INLINE_CMD_SIZE \
39 + MAX_INLINE_RESP_SIZE)
41 void virtio_gpu_resource_id_get(struct virtio_gpu_device
*vgdev
,
46 idr_preload(GFP_KERNEL
);
47 spin_lock(&vgdev
->resource_idr_lock
);
48 handle
= idr_alloc(&vgdev
->resource_idr
, NULL
, 1, 0, GFP_NOWAIT
);
49 spin_unlock(&vgdev
->resource_idr_lock
);
54 void virtio_gpu_resource_id_put(struct virtio_gpu_device
*vgdev
, uint32_t id
)
56 spin_lock(&vgdev
->resource_idr_lock
);
57 idr_remove(&vgdev
->resource_idr
, id
);
58 spin_unlock(&vgdev
->resource_idr_lock
);
61 void virtio_gpu_ctrl_ack(struct virtqueue
*vq
)
63 struct drm_device
*dev
= vq
->vdev
->priv
;
64 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
65 schedule_work(&vgdev
->ctrlq
.dequeue_work
);
68 void virtio_gpu_cursor_ack(struct virtqueue
*vq
)
70 struct drm_device
*dev
= vq
->vdev
->priv
;
71 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
72 schedule_work(&vgdev
->cursorq
.dequeue_work
);
75 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device
*vgdev
)
77 struct virtio_gpu_vbuffer
*vbuf
;
78 int i
, size
, count
= 0;
81 INIT_LIST_HEAD(&vgdev
->free_vbufs
);
82 spin_lock_init(&vgdev
->free_vbufs_lock
);
83 count
+= virtqueue_get_vring_size(vgdev
->ctrlq
.vq
);
84 count
+= virtqueue_get_vring_size(vgdev
->cursorq
.vq
);
85 size
= count
* VBUFFER_SIZE
;
86 DRM_INFO("virtio vbuffers: %d bufs, %zdB each, %dkB total.\n",
87 count
, VBUFFER_SIZE
, size
/ 1024);
89 vgdev
->vbufs
= kzalloc(size
, GFP_KERNEL
);
93 for (i
= 0, ptr
= vgdev
->vbufs
;
95 i
++, ptr
+= VBUFFER_SIZE
) {
97 list_add(&vbuf
->list
, &vgdev
->free_vbufs
);
102 void virtio_gpu_free_vbufs(struct virtio_gpu_device
*vgdev
)
104 struct virtio_gpu_vbuffer
*vbuf
;
107 count
+= virtqueue_get_vring_size(vgdev
->ctrlq
.vq
);
108 count
+= virtqueue_get_vring_size(vgdev
->cursorq
.vq
);
110 spin_lock(&vgdev
->free_vbufs_lock
);
111 for (i
= 0; i
< count
; i
++) {
112 if (WARN_ON(list_empty(&vgdev
->free_vbufs
)))
114 vbuf
= list_first_entry(&vgdev
->free_vbufs
,
115 struct virtio_gpu_vbuffer
, list
);
116 list_del(&vbuf
->list
);
118 spin_unlock(&vgdev
->free_vbufs_lock
);
122 static struct virtio_gpu_vbuffer
*
123 virtio_gpu_get_vbuf(struct virtio_gpu_device
*vgdev
,
124 int size
, int resp_size
, void *resp_buf
,
125 virtio_gpu_resp_cb resp_cb
)
127 struct virtio_gpu_vbuffer
*vbuf
;
129 spin_lock(&vgdev
->free_vbufs_lock
);
130 BUG_ON(list_empty(&vgdev
->free_vbufs
));
131 vbuf
= list_first_entry(&vgdev
->free_vbufs
,
132 struct virtio_gpu_vbuffer
, list
);
133 list_del(&vbuf
->list
);
134 spin_unlock(&vgdev
->free_vbufs_lock
);
135 memset(vbuf
, 0, VBUFFER_SIZE
);
137 BUG_ON(size
> MAX_INLINE_CMD_SIZE
);
138 vbuf
->buf
= (void *)vbuf
+ sizeof(*vbuf
);
141 vbuf
->resp_cb
= resp_cb
;
142 vbuf
->resp_size
= resp_size
;
143 if (resp_size
<= MAX_INLINE_RESP_SIZE
)
144 vbuf
->resp_buf
= (void *)vbuf
->buf
+ size
;
146 vbuf
->resp_buf
= resp_buf
;
147 BUG_ON(!vbuf
->resp_buf
);
151 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device
*vgdev
,
152 struct virtio_gpu_vbuffer
**vbuffer_p
,
155 struct virtio_gpu_vbuffer
*vbuf
;
157 vbuf
= virtio_gpu_get_vbuf(vgdev
, size
,
158 sizeof(struct virtio_gpu_ctrl_hdr
),
162 return ERR_CAST(vbuf
);
168 static struct virtio_gpu_update_cursor
*
169 virtio_gpu_alloc_cursor(struct virtio_gpu_device
*vgdev
,
170 struct virtio_gpu_vbuffer
**vbuffer_p
)
172 struct virtio_gpu_vbuffer
*vbuf
;
174 vbuf
= virtio_gpu_get_vbuf
175 (vgdev
, sizeof(struct virtio_gpu_update_cursor
),
179 return ERR_CAST(vbuf
);
182 return (struct virtio_gpu_update_cursor
*)vbuf
->buf
;
185 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device
*vgdev
,
186 virtio_gpu_resp_cb cb
,
187 struct virtio_gpu_vbuffer
**vbuffer_p
,
188 int cmd_size
, int resp_size
,
191 struct virtio_gpu_vbuffer
*vbuf
;
193 vbuf
= virtio_gpu_get_vbuf(vgdev
, cmd_size
,
194 resp_size
, resp_buf
, cb
);
197 return ERR_CAST(vbuf
);
200 return (struct virtio_gpu_command
*)vbuf
->buf
;
203 static void free_vbuf(struct virtio_gpu_device
*vgdev
,
204 struct virtio_gpu_vbuffer
*vbuf
)
206 if (vbuf
->resp_size
> MAX_INLINE_RESP_SIZE
)
207 kfree(vbuf
->resp_buf
);
208 kfree(vbuf
->data_buf
);
209 spin_lock(&vgdev
->free_vbufs_lock
);
210 list_add(&vbuf
->list
, &vgdev
->free_vbufs
);
211 spin_unlock(&vgdev
->free_vbufs_lock
);
214 static void reclaim_vbufs(struct virtqueue
*vq
, struct list_head
*reclaim_list
)
216 struct virtio_gpu_vbuffer
*vbuf
;
220 while ((vbuf
= virtqueue_get_buf(vq
, &len
))) {
221 list_add_tail(&vbuf
->list
, reclaim_list
);
225 DRM_DEBUG("Huh? zero vbufs reclaimed");
228 void virtio_gpu_dequeue_ctrl_func(struct work_struct
*work
)
230 struct virtio_gpu_device
*vgdev
=
231 container_of(work
, struct virtio_gpu_device
,
233 struct list_head reclaim_list
;
234 struct virtio_gpu_vbuffer
*entry
, *tmp
;
235 struct virtio_gpu_ctrl_hdr
*resp
;
238 INIT_LIST_HEAD(&reclaim_list
);
239 spin_lock(&vgdev
->ctrlq
.qlock
);
241 virtqueue_disable_cb(vgdev
->ctrlq
.vq
);
242 reclaim_vbufs(vgdev
->ctrlq
.vq
, &reclaim_list
);
244 } while (!virtqueue_enable_cb(vgdev
->ctrlq
.vq
));
245 spin_unlock(&vgdev
->ctrlq
.qlock
);
247 list_for_each_entry_safe(entry
, tmp
, &reclaim_list
, list
) {
248 resp
= (struct virtio_gpu_ctrl_hdr
*)entry
->resp_buf
;
249 if (resp
->type
!= cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA
))
250 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp
->type
));
251 if (resp
->flags
& cpu_to_le32(VIRTIO_GPU_FLAG_FENCE
)) {
252 u64 f
= le64_to_cpu(resp
->fence_id
);
255 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
256 __func__
, fence_id
, f
);
262 entry
->resp_cb(vgdev
, entry
);
264 list_del(&entry
->list
);
265 free_vbuf(vgdev
, entry
);
267 wake_up(&vgdev
->ctrlq
.ack_queue
);
270 virtio_gpu_fence_event_process(vgdev
, fence_id
);
273 void virtio_gpu_dequeue_cursor_func(struct work_struct
*work
)
275 struct virtio_gpu_device
*vgdev
=
276 container_of(work
, struct virtio_gpu_device
,
277 cursorq
.dequeue_work
);
278 struct list_head reclaim_list
;
279 struct virtio_gpu_vbuffer
*entry
, *tmp
;
281 INIT_LIST_HEAD(&reclaim_list
);
282 spin_lock(&vgdev
->cursorq
.qlock
);
284 virtqueue_disable_cb(vgdev
->cursorq
.vq
);
285 reclaim_vbufs(vgdev
->cursorq
.vq
, &reclaim_list
);
286 } while (!virtqueue_enable_cb(vgdev
->cursorq
.vq
));
287 spin_unlock(&vgdev
->cursorq
.qlock
);
289 list_for_each_entry_safe(entry
, tmp
, &reclaim_list
, list
) {
290 list_del(&entry
->list
);
291 free_vbuf(vgdev
, entry
);
293 wake_up(&vgdev
->cursorq
.ack_queue
);
296 static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device
*vgdev
,
297 struct virtio_gpu_vbuffer
*vbuf
)
299 struct virtqueue
*vq
= vgdev
->ctrlq
.vq
;
300 struct scatterlist
*sgs
[3], vcmd
, vout
, vresp
;
301 int outcnt
= 0, incnt
= 0;
304 if (!vgdev
->vqs_ready
)
307 sg_init_one(&vcmd
, vbuf
->buf
, vbuf
->size
);
308 sgs
[outcnt
+incnt
] = &vcmd
;
311 if (vbuf
->data_size
) {
312 sg_init_one(&vout
, vbuf
->data_buf
, vbuf
->data_size
);
313 sgs
[outcnt
+ incnt
] = &vout
;
317 if (vbuf
->resp_size
) {
318 sg_init_one(&vresp
, vbuf
->resp_buf
, vbuf
->resp_size
);
319 sgs
[outcnt
+ incnt
] = &vresp
;
324 ret
= virtqueue_add_sgs(vq
, sgs
, outcnt
, incnt
, vbuf
, GFP_ATOMIC
);
325 if (ret
== -ENOSPC
) {
326 spin_unlock(&vgdev
->ctrlq
.qlock
);
327 wait_event(vgdev
->ctrlq
.ack_queue
, vq
->num_free
>= outcnt
+ incnt
);
328 spin_lock(&vgdev
->ctrlq
.qlock
);
339 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device
*vgdev
,
340 struct virtio_gpu_vbuffer
*vbuf
)
344 spin_lock(&vgdev
->ctrlq
.qlock
);
345 rc
= virtio_gpu_queue_ctrl_buffer_locked(vgdev
, vbuf
);
346 spin_unlock(&vgdev
->ctrlq
.qlock
);
350 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device
*vgdev
,
351 struct virtio_gpu_vbuffer
*vbuf
,
352 struct virtio_gpu_ctrl_hdr
*hdr
,
353 struct virtio_gpu_fence
**fence
)
355 struct virtqueue
*vq
= vgdev
->ctrlq
.vq
;
359 spin_lock(&vgdev
->ctrlq
.qlock
);
362 * Make sure we have enouth space in the virtqueue. If not
363 * wait here until we have.
365 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
366 * to wait for free space, which can result in fence ids being
367 * submitted out-of-order.
369 if (vq
->num_free
< 3) {
370 spin_unlock(&vgdev
->ctrlq
.qlock
);
371 wait_event(vgdev
->ctrlq
.ack_queue
, vq
->num_free
>= 3);
376 virtio_gpu_fence_emit(vgdev
, hdr
, fence
);
377 rc
= virtio_gpu_queue_ctrl_buffer_locked(vgdev
, vbuf
);
378 spin_unlock(&vgdev
->ctrlq
.qlock
);
382 static int virtio_gpu_queue_cursor(struct virtio_gpu_device
*vgdev
,
383 struct virtio_gpu_vbuffer
*vbuf
)
385 struct virtqueue
*vq
= vgdev
->cursorq
.vq
;
386 struct scatterlist
*sgs
[1], ccmd
;
390 if (!vgdev
->vqs_ready
)
393 sg_init_one(&ccmd
, vbuf
->buf
, vbuf
->size
);
397 spin_lock(&vgdev
->cursorq
.qlock
);
399 ret
= virtqueue_add_sgs(vq
, sgs
, outcnt
, 0, vbuf
, GFP_ATOMIC
);
400 if (ret
== -ENOSPC
) {
401 spin_unlock(&vgdev
->cursorq
.qlock
);
402 wait_event(vgdev
->cursorq
.ack_queue
, vq
->num_free
>= outcnt
);
403 spin_lock(&vgdev
->cursorq
.qlock
);
409 spin_unlock(&vgdev
->cursorq
.qlock
);
416 /* just create gem objects for userspace and long lived objects,
417 just use dma_alloced pages for the queue objects? */
419 /* create a basic resource */
420 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device
*vgdev
,
421 uint32_t resource_id
,
426 struct virtio_gpu_resource_create_2d
*cmd_p
;
427 struct virtio_gpu_vbuffer
*vbuf
;
429 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
430 memset(cmd_p
, 0, sizeof(*cmd_p
));
432 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
);
433 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
434 cmd_p
->format
= cpu_to_le32(format
);
435 cmd_p
->width
= cpu_to_le32(width
);
436 cmd_p
->height
= cpu_to_le32(height
);
438 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
441 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device
*vgdev
,
442 uint32_t resource_id
)
444 struct virtio_gpu_resource_unref
*cmd_p
;
445 struct virtio_gpu_vbuffer
*vbuf
;
447 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
448 memset(cmd_p
, 0, sizeof(*cmd_p
));
450 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF
);
451 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
453 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
456 void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device
*vgdev
,
457 uint32_t resource_id
)
459 struct virtio_gpu_resource_detach_backing
*cmd_p
;
460 struct virtio_gpu_vbuffer
*vbuf
;
462 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
463 memset(cmd_p
, 0, sizeof(*cmd_p
));
465 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
);
466 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
468 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
471 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device
*vgdev
,
472 uint32_t scanout_id
, uint32_t resource_id
,
473 uint32_t width
, uint32_t height
,
474 uint32_t x
, uint32_t y
)
476 struct virtio_gpu_set_scanout
*cmd_p
;
477 struct virtio_gpu_vbuffer
*vbuf
;
479 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
480 memset(cmd_p
, 0, sizeof(*cmd_p
));
482 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT
);
483 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
484 cmd_p
->scanout_id
= cpu_to_le32(scanout_id
);
485 cmd_p
->r
.width
= cpu_to_le32(width
);
486 cmd_p
->r
.height
= cpu_to_le32(height
);
487 cmd_p
->r
.x
= cpu_to_le32(x
);
488 cmd_p
->r
.y
= cpu_to_le32(y
);
490 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
493 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device
*vgdev
,
494 uint32_t resource_id
,
495 uint32_t x
, uint32_t y
,
496 uint32_t width
, uint32_t height
)
498 struct virtio_gpu_resource_flush
*cmd_p
;
499 struct virtio_gpu_vbuffer
*vbuf
;
501 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
502 memset(cmd_p
, 0, sizeof(*cmd_p
));
504 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH
);
505 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
506 cmd_p
->r
.width
= cpu_to_le32(width
);
507 cmd_p
->r
.height
= cpu_to_le32(height
);
508 cmd_p
->r
.x
= cpu_to_le32(x
);
509 cmd_p
->r
.y
= cpu_to_le32(y
);
511 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
514 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device
*vgdev
,
515 uint32_t resource_id
, uint64_t offset
,
516 __le32 width
, __le32 height
,
518 struct virtio_gpu_fence
**fence
)
520 struct virtio_gpu_transfer_to_host_2d
*cmd_p
;
521 struct virtio_gpu_vbuffer
*vbuf
;
523 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
524 memset(cmd_p
, 0, sizeof(*cmd_p
));
526 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
);
527 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
528 cmd_p
->offset
= cpu_to_le64(offset
);
529 cmd_p
->r
.width
= width
;
530 cmd_p
->r
.height
= height
;
534 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, &cmd_p
->hdr
, fence
);
538 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device
*vgdev
,
539 uint32_t resource_id
,
540 struct virtio_gpu_mem_entry
*ents
,
542 struct virtio_gpu_fence
**fence
)
544 struct virtio_gpu_resource_attach_backing
*cmd_p
;
545 struct virtio_gpu_vbuffer
*vbuf
;
547 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
548 memset(cmd_p
, 0, sizeof(*cmd_p
));
550 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
);
551 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
552 cmd_p
->nr_entries
= cpu_to_le32(nents
);
554 vbuf
->data_buf
= ents
;
555 vbuf
->data_size
= sizeof(*ents
) * nents
;
557 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, &cmd_p
->hdr
, fence
);
560 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device
*vgdev
,
561 struct virtio_gpu_vbuffer
*vbuf
)
563 struct virtio_gpu_resp_display_info
*resp
=
564 (struct virtio_gpu_resp_display_info
*)vbuf
->resp_buf
;
567 spin_lock(&vgdev
->display_info_lock
);
568 for (i
= 0; i
< vgdev
->num_scanouts
; i
++) {
569 vgdev
->outputs
[i
].info
= resp
->pmodes
[i
];
570 if (resp
->pmodes
[i
].enabled
) {
571 DRM_DEBUG("output %d: %dx%d+%d+%d", i
,
572 le32_to_cpu(resp
->pmodes
[i
].r
.width
),
573 le32_to_cpu(resp
->pmodes
[i
].r
.height
),
574 le32_to_cpu(resp
->pmodes
[i
].r
.x
),
575 le32_to_cpu(resp
->pmodes
[i
].r
.y
));
577 DRM_DEBUG("output %d: disabled", i
);
581 vgdev
->display_info_pending
= false;
582 spin_unlock(&vgdev
->display_info_lock
);
583 wake_up(&vgdev
->resp_wq
);
585 if (!drm_helper_hpd_irq_event(vgdev
->ddev
))
586 drm_kms_helper_hotplug_event(vgdev
->ddev
);
589 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device
*vgdev
,
590 struct virtio_gpu_vbuffer
*vbuf
)
592 struct virtio_gpu_get_capset_info
*cmd
=
593 (struct virtio_gpu_get_capset_info
*)vbuf
->buf
;
594 struct virtio_gpu_resp_capset_info
*resp
=
595 (struct virtio_gpu_resp_capset_info
*)vbuf
->resp_buf
;
596 int i
= le32_to_cpu(cmd
->capset_index
);
598 spin_lock(&vgdev
->display_info_lock
);
599 if (vgdev
->capsets
) {
600 vgdev
->capsets
[i
].id
= le32_to_cpu(resp
->capset_id
);
601 vgdev
->capsets
[i
].max_version
= le32_to_cpu(resp
->capset_max_version
);
602 vgdev
->capsets
[i
].max_size
= le32_to_cpu(resp
->capset_max_size
);
604 DRM_ERROR("invalid capset memory.");
606 spin_unlock(&vgdev
->display_info_lock
);
607 wake_up(&vgdev
->resp_wq
);
610 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device
*vgdev
,
611 struct virtio_gpu_vbuffer
*vbuf
)
613 struct virtio_gpu_get_capset
*cmd
=
614 (struct virtio_gpu_get_capset
*)vbuf
->buf
;
615 struct virtio_gpu_resp_capset
*resp
=
616 (struct virtio_gpu_resp_capset
*)vbuf
->resp_buf
;
617 struct virtio_gpu_drv_cap_cache
*cache_ent
;
619 spin_lock(&vgdev
->display_info_lock
);
620 list_for_each_entry(cache_ent
, &vgdev
->cap_cache
, head
) {
621 if (cache_ent
->version
== le32_to_cpu(cmd
->capset_version
) &&
622 cache_ent
->id
== le32_to_cpu(cmd
->capset_id
)) {
623 memcpy(cache_ent
->caps_cache
, resp
->capset_data
,
625 /* Copy must occur before is_valid is signalled. */
627 atomic_set(&cache_ent
->is_valid
, 1);
631 spin_unlock(&vgdev
->display_info_lock
);
632 wake_up(&vgdev
->resp_wq
);
636 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device
*vgdev
)
638 struct virtio_gpu_ctrl_hdr
*cmd_p
;
639 struct virtio_gpu_vbuffer
*vbuf
;
642 resp_buf
= kzalloc(sizeof(struct virtio_gpu_resp_display_info
),
647 cmd_p
= virtio_gpu_alloc_cmd_resp
648 (vgdev
, &virtio_gpu_cmd_get_display_info_cb
, &vbuf
,
649 sizeof(*cmd_p
), sizeof(struct virtio_gpu_resp_display_info
),
651 memset(cmd_p
, 0, sizeof(*cmd_p
));
653 vgdev
->display_info_pending
= true;
654 cmd_p
->type
= cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO
);
655 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
659 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device
*vgdev
, int idx
)
661 struct virtio_gpu_get_capset_info
*cmd_p
;
662 struct virtio_gpu_vbuffer
*vbuf
;
665 resp_buf
= kzalloc(sizeof(struct virtio_gpu_resp_capset_info
),
670 cmd_p
= virtio_gpu_alloc_cmd_resp
671 (vgdev
, &virtio_gpu_cmd_get_capset_info_cb
, &vbuf
,
672 sizeof(*cmd_p
), sizeof(struct virtio_gpu_resp_capset_info
),
674 memset(cmd_p
, 0, sizeof(*cmd_p
));
676 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO
);
677 cmd_p
->capset_index
= cpu_to_le32(idx
);
678 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
682 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device
*vgdev
,
683 int idx
, int version
,
684 struct virtio_gpu_drv_cap_cache
**cache_p
)
686 struct virtio_gpu_get_capset
*cmd_p
;
687 struct virtio_gpu_vbuffer
*vbuf
;
689 struct virtio_gpu_drv_cap_cache
*cache_ent
;
692 if (idx
>= vgdev
->num_capsets
)
695 if (version
> vgdev
->capsets
[idx
].max_version
)
698 cache_ent
= kzalloc(sizeof(*cache_ent
), GFP_KERNEL
);
702 max_size
= vgdev
->capsets
[idx
].max_size
;
703 cache_ent
->caps_cache
= kmalloc(max_size
, GFP_KERNEL
);
704 if (!cache_ent
->caps_cache
) {
709 resp_buf
= kzalloc(sizeof(struct virtio_gpu_resp_capset
) + max_size
,
712 kfree(cache_ent
->caps_cache
);
717 cache_ent
->version
= version
;
718 cache_ent
->id
= vgdev
->capsets
[idx
].id
;
719 atomic_set(&cache_ent
->is_valid
, 0);
720 cache_ent
->size
= max_size
;
721 spin_lock(&vgdev
->display_info_lock
);
722 list_add_tail(&cache_ent
->head
, &vgdev
->cap_cache
);
723 spin_unlock(&vgdev
->display_info_lock
);
725 cmd_p
= virtio_gpu_alloc_cmd_resp
726 (vgdev
, &virtio_gpu_cmd_capset_cb
, &vbuf
, sizeof(*cmd_p
),
727 sizeof(struct virtio_gpu_resp_capset
) + max_size
,
729 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET
);
730 cmd_p
->capset_id
= cpu_to_le32(vgdev
->capsets
[idx
].id
);
731 cmd_p
->capset_version
= cpu_to_le32(version
);
732 *cache_p
= cache_ent
;
733 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
738 void virtio_gpu_cmd_context_create(struct virtio_gpu_device
*vgdev
, uint32_t id
,
739 uint32_t nlen
, const char *name
)
741 struct virtio_gpu_ctx_create
*cmd_p
;
742 struct virtio_gpu_vbuffer
*vbuf
;
744 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
745 memset(cmd_p
, 0, sizeof(*cmd_p
));
747 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE
);
748 cmd_p
->hdr
.ctx_id
= cpu_to_le32(id
);
749 cmd_p
->nlen
= cpu_to_le32(nlen
);
750 strncpy(cmd_p
->debug_name
, name
, sizeof(cmd_p
->debug_name
)-1);
751 cmd_p
->debug_name
[sizeof(cmd_p
->debug_name
)-1] = 0;
752 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
755 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device
*vgdev
,
758 struct virtio_gpu_ctx_destroy
*cmd_p
;
759 struct virtio_gpu_vbuffer
*vbuf
;
761 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
762 memset(cmd_p
, 0, sizeof(*cmd_p
));
764 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY
);
765 cmd_p
->hdr
.ctx_id
= cpu_to_le32(id
);
766 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
769 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device
*vgdev
,
771 uint32_t resource_id
)
773 struct virtio_gpu_ctx_resource
*cmd_p
;
774 struct virtio_gpu_vbuffer
*vbuf
;
776 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
777 memset(cmd_p
, 0, sizeof(*cmd_p
));
779 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE
);
780 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
781 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
782 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
786 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device
*vgdev
,
788 uint32_t resource_id
)
790 struct virtio_gpu_ctx_resource
*cmd_p
;
791 struct virtio_gpu_vbuffer
*vbuf
;
793 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
794 memset(cmd_p
, 0, sizeof(*cmd_p
));
796 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE
);
797 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
798 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
799 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
803 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device
*vgdev
,
804 struct virtio_gpu_resource_create_3d
*rc_3d
,
805 struct virtio_gpu_fence
**fence
)
807 struct virtio_gpu_resource_create_3d
*cmd_p
;
808 struct virtio_gpu_vbuffer
*vbuf
;
810 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
811 memset(cmd_p
, 0, sizeof(*cmd_p
));
814 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D
);
815 cmd_p
->hdr
.flags
= 0;
817 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, &cmd_p
->hdr
, fence
);
820 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device
*vgdev
,
821 uint32_t resource_id
, uint32_t ctx_id
,
822 uint64_t offset
, uint32_t level
,
823 struct virtio_gpu_box
*box
,
824 struct virtio_gpu_fence
**fence
)
826 struct virtio_gpu_transfer_host_3d
*cmd_p
;
827 struct virtio_gpu_vbuffer
*vbuf
;
829 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
830 memset(cmd_p
, 0, sizeof(*cmd_p
));
832 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D
);
833 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
834 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
836 cmd_p
->offset
= cpu_to_le64(offset
);
837 cmd_p
->level
= cpu_to_le32(level
);
839 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, &cmd_p
->hdr
, fence
);
842 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device
*vgdev
,
843 uint32_t resource_id
, uint32_t ctx_id
,
844 uint64_t offset
, uint32_t level
,
845 struct virtio_gpu_box
*box
,
846 struct virtio_gpu_fence
**fence
)
848 struct virtio_gpu_transfer_host_3d
*cmd_p
;
849 struct virtio_gpu_vbuffer
*vbuf
;
851 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
852 memset(cmd_p
, 0, sizeof(*cmd_p
));
854 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D
);
855 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
856 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
858 cmd_p
->offset
= cpu_to_le64(offset
);
859 cmd_p
->level
= cpu_to_le32(level
);
861 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, &cmd_p
->hdr
, fence
);
864 void virtio_gpu_cmd_submit(struct virtio_gpu_device
*vgdev
,
865 void *data
, uint32_t data_size
,
866 uint32_t ctx_id
, struct virtio_gpu_fence
**fence
)
868 struct virtio_gpu_cmd_submit
*cmd_p
;
869 struct virtio_gpu_vbuffer
*vbuf
;
871 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
872 memset(cmd_p
, 0, sizeof(*cmd_p
));
874 vbuf
->data_buf
= data
;
875 vbuf
->data_size
= data_size
;
877 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D
);
878 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
879 cmd_p
->size
= cpu_to_le32(data_size
);
881 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, &cmd_p
->hdr
, fence
);
884 int virtio_gpu_object_attach(struct virtio_gpu_device
*vgdev
,
885 struct virtio_gpu_object
*obj
,
886 uint32_t resource_id
,
887 struct virtio_gpu_fence
**fence
)
889 struct virtio_gpu_mem_entry
*ents
;
890 struct scatterlist
*sg
;
895 ret
= virtio_gpu_object_get_sg_table(vgdev
, obj
);
900 /* gets freed when the ring has consumed it */
901 ents
= kmalloc_array(obj
->pages
->nents
,
902 sizeof(struct virtio_gpu_mem_entry
),
905 DRM_ERROR("failed to allocate ent list\n");
909 for_each_sg(obj
->pages
->sgl
, sg
, obj
->pages
->nents
, si
) {
910 ents
[si
].addr
= cpu_to_le64(sg_phys(sg
));
911 ents
[si
].length
= cpu_to_le32(sg
->length
);
912 ents
[si
].padding
= 0;
915 virtio_gpu_cmd_resource_attach_backing(vgdev
, resource_id
,
916 ents
, obj
->pages
->nents
,
918 obj
->hw_res_handle
= resource_id
;
922 void virtio_gpu_cursor_ping(struct virtio_gpu_device
*vgdev
,
923 struct virtio_gpu_output
*output
)
925 struct virtio_gpu_vbuffer
*vbuf
;
926 struct virtio_gpu_update_cursor
*cur_p
;
928 output
->cursor
.pos
.scanout_id
= cpu_to_le32(output
->index
);
929 cur_p
= virtio_gpu_alloc_cursor(vgdev
, &vbuf
);
930 memcpy(cur_p
, &output
->cursor
, sizeof(output
->cursor
));
931 virtio_gpu_queue_cursor(vgdev
, vbuf
);