2 * Copyright (C) 2015 Red Hat, Inc.
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
30 #include "virtgpu_drv.h"
31 #include <linux/virtio.h>
32 #include <linux/virtio_config.h>
33 #include <linux/virtio_ring.h>
35 #define MAX_INLINE_CMD_SIZE 96
36 #define MAX_INLINE_RESP_SIZE 24
37 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
38 + MAX_INLINE_CMD_SIZE \
39 + MAX_INLINE_RESP_SIZE)
41 void virtio_gpu_resource_id_get(struct virtio_gpu_device
*vgdev
,
46 idr_preload(GFP_KERNEL
);
47 spin_lock(&vgdev
->resource_idr_lock
);
48 handle
= idr_alloc(&vgdev
->resource_idr
, NULL
, 1, 0, GFP_NOWAIT
);
49 spin_unlock(&vgdev
->resource_idr_lock
);
54 void virtio_gpu_resource_id_put(struct virtio_gpu_device
*vgdev
, uint32_t id
)
56 spin_lock(&vgdev
->resource_idr_lock
);
57 idr_remove(&vgdev
->resource_idr
, id
);
58 spin_unlock(&vgdev
->resource_idr_lock
);
61 void virtio_gpu_ctrl_ack(struct virtqueue
*vq
)
63 struct drm_device
*dev
= vq
->vdev
->priv
;
64 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
65 schedule_work(&vgdev
->ctrlq
.dequeue_work
);
68 void virtio_gpu_cursor_ack(struct virtqueue
*vq
)
70 struct drm_device
*dev
= vq
->vdev
->priv
;
71 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
72 schedule_work(&vgdev
->cursorq
.dequeue_work
);
75 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device
*vgdev
)
77 vgdev
->vbufs
= kmem_cache_create("virtio-gpu-vbufs",
79 __alignof__(struct virtio_gpu_vbuffer
),
86 void virtio_gpu_free_vbufs(struct virtio_gpu_device
*vgdev
)
88 kmem_cache_destroy(vgdev
->vbufs
);
92 static struct virtio_gpu_vbuffer
*
93 virtio_gpu_get_vbuf(struct virtio_gpu_device
*vgdev
,
94 int size
, int resp_size
, void *resp_buf
,
95 virtio_gpu_resp_cb resp_cb
)
97 struct virtio_gpu_vbuffer
*vbuf
;
99 vbuf
= kmem_cache_alloc(vgdev
->vbufs
, GFP_KERNEL
);
101 return ERR_PTR(-ENOMEM
);
102 memset(vbuf
, 0, VBUFFER_SIZE
);
104 BUG_ON(size
> MAX_INLINE_CMD_SIZE
);
105 vbuf
->buf
= (void *)vbuf
+ sizeof(*vbuf
);
108 vbuf
->resp_cb
= resp_cb
;
109 vbuf
->resp_size
= resp_size
;
110 if (resp_size
<= MAX_INLINE_RESP_SIZE
)
111 vbuf
->resp_buf
= (void *)vbuf
->buf
+ size
;
113 vbuf
->resp_buf
= resp_buf
;
114 BUG_ON(!vbuf
->resp_buf
);
118 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device
*vgdev
,
119 struct virtio_gpu_vbuffer
**vbuffer_p
,
122 struct virtio_gpu_vbuffer
*vbuf
;
124 vbuf
= virtio_gpu_get_vbuf(vgdev
, size
,
125 sizeof(struct virtio_gpu_ctrl_hdr
),
129 return ERR_CAST(vbuf
);
135 static struct virtio_gpu_update_cursor
*
136 virtio_gpu_alloc_cursor(struct virtio_gpu_device
*vgdev
,
137 struct virtio_gpu_vbuffer
**vbuffer_p
)
139 struct virtio_gpu_vbuffer
*vbuf
;
141 vbuf
= virtio_gpu_get_vbuf
142 (vgdev
, sizeof(struct virtio_gpu_update_cursor
),
146 return ERR_CAST(vbuf
);
149 return (struct virtio_gpu_update_cursor
*)vbuf
->buf
;
152 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device
*vgdev
,
153 virtio_gpu_resp_cb cb
,
154 struct virtio_gpu_vbuffer
**vbuffer_p
,
155 int cmd_size
, int resp_size
,
158 struct virtio_gpu_vbuffer
*vbuf
;
160 vbuf
= virtio_gpu_get_vbuf(vgdev
, cmd_size
,
161 resp_size
, resp_buf
, cb
);
164 return ERR_CAST(vbuf
);
167 return (struct virtio_gpu_command
*)vbuf
->buf
;
170 static void free_vbuf(struct virtio_gpu_device
*vgdev
,
171 struct virtio_gpu_vbuffer
*vbuf
)
173 if (vbuf
->resp_size
> MAX_INLINE_RESP_SIZE
)
174 kfree(vbuf
->resp_buf
);
175 kfree(vbuf
->data_buf
);
176 kmem_cache_free(vgdev
->vbufs
, vbuf
);
179 static void reclaim_vbufs(struct virtqueue
*vq
, struct list_head
*reclaim_list
)
181 struct virtio_gpu_vbuffer
*vbuf
;
185 while ((vbuf
= virtqueue_get_buf(vq
, &len
))) {
186 list_add_tail(&vbuf
->list
, reclaim_list
);
190 DRM_DEBUG("Huh? zero vbufs reclaimed");
193 void virtio_gpu_dequeue_ctrl_func(struct work_struct
*work
)
195 struct virtio_gpu_device
*vgdev
=
196 container_of(work
, struct virtio_gpu_device
,
198 struct list_head reclaim_list
;
199 struct virtio_gpu_vbuffer
*entry
, *tmp
;
200 struct virtio_gpu_ctrl_hdr
*resp
;
203 INIT_LIST_HEAD(&reclaim_list
);
204 spin_lock(&vgdev
->ctrlq
.qlock
);
206 virtqueue_disable_cb(vgdev
->ctrlq
.vq
);
207 reclaim_vbufs(vgdev
->ctrlq
.vq
, &reclaim_list
);
209 } while (!virtqueue_enable_cb(vgdev
->ctrlq
.vq
));
210 spin_unlock(&vgdev
->ctrlq
.qlock
);
212 list_for_each_entry_safe(entry
, tmp
, &reclaim_list
, list
) {
213 resp
= (struct virtio_gpu_ctrl_hdr
*)entry
->resp_buf
;
214 if (resp
->type
!= cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA
))
215 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp
->type
));
216 if (resp
->flags
& cpu_to_le32(VIRTIO_GPU_FLAG_FENCE
)) {
217 u64 f
= le64_to_cpu(resp
->fence_id
);
220 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
221 __func__
, fence_id
, f
);
227 entry
->resp_cb(vgdev
, entry
);
229 list_del(&entry
->list
);
230 free_vbuf(vgdev
, entry
);
232 wake_up(&vgdev
->ctrlq
.ack_queue
);
235 virtio_gpu_fence_event_process(vgdev
, fence_id
);
238 void virtio_gpu_dequeue_cursor_func(struct work_struct
*work
)
240 struct virtio_gpu_device
*vgdev
=
241 container_of(work
, struct virtio_gpu_device
,
242 cursorq
.dequeue_work
);
243 struct list_head reclaim_list
;
244 struct virtio_gpu_vbuffer
*entry
, *tmp
;
246 INIT_LIST_HEAD(&reclaim_list
);
247 spin_lock(&vgdev
->cursorq
.qlock
);
249 virtqueue_disable_cb(vgdev
->cursorq
.vq
);
250 reclaim_vbufs(vgdev
->cursorq
.vq
, &reclaim_list
);
251 } while (!virtqueue_enable_cb(vgdev
->cursorq
.vq
));
252 spin_unlock(&vgdev
->cursorq
.qlock
);
254 list_for_each_entry_safe(entry
, tmp
, &reclaim_list
, list
) {
255 list_del(&entry
->list
);
256 free_vbuf(vgdev
, entry
);
258 wake_up(&vgdev
->cursorq
.ack_queue
);
261 static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device
*vgdev
,
262 struct virtio_gpu_vbuffer
*vbuf
)
263 __releases(&vgdev
->ctrlq
.qlock
)
264 __acquires(&vgdev
->ctrlq
.qlock
)
266 struct virtqueue
*vq
= vgdev
->ctrlq
.vq
;
267 struct scatterlist
*sgs
[3], vcmd
, vout
, vresp
;
268 int outcnt
= 0, incnt
= 0;
271 if (!vgdev
->vqs_ready
)
274 sg_init_one(&vcmd
, vbuf
->buf
, vbuf
->size
);
275 sgs
[outcnt
+incnt
] = &vcmd
;
278 if (vbuf
->data_size
) {
279 sg_init_one(&vout
, vbuf
->data_buf
, vbuf
->data_size
);
280 sgs
[outcnt
+ incnt
] = &vout
;
284 if (vbuf
->resp_size
) {
285 sg_init_one(&vresp
, vbuf
->resp_buf
, vbuf
->resp_size
);
286 sgs
[outcnt
+ incnt
] = &vresp
;
291 ret
= virtqueue_add_sgs(vq
, sgs
, outcnt
, incnt
, vbuf
, GFP_ATOMIC
);
292 if (ret
== -ENOSPC
) {
293 spin_unlock(&vgdev
->ctrlq
.qlock
);
294 wait_event(vgdev
->ctrlq
.ack_queue
, vq
->num_free
);
295 spin_lock(&vgdev
->ctrlq
.qlock
);
306 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device
*vgdev
,
307 struct virtio_gpu_vbuffer
*vbuf
)
311 spin_lock(&vgdev
->ctrlq
.qlock
);
312 rc
= virtio_gpu_queue_ctrl_buffer_locked(vgdev
, vbuf
);
313 spin_unlock(&vgdev
->ctrlq
.qlock
);
317 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device
*vgdev
,
318 struct virtio_gpu_vbuffer
*vbuf
,
319 struct virtio_gpu_ctrl_hdr
*hdr
,
320 struct virtio_gpu_fence
**fence
)
322 struct virtqueue
*vq
= vgdev
->ctrlq
.vq
;
326 spin_lock(&vgdev
->ctrlq
.qlock
);
329 * Make sure we have enouth space in the virtqueue. If not
330 * wait here until we have.
332 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
333 * to wait for free space, which can result in fence ids being
334 * submitted out-of-order.
336 if (vq
->num_free
< 3) {
337 spin_unlock(&vgdev
->ctrlq
.qlock
);
338 wait_event(vgdev
->ctrlq
.ack_queue
, vq
->num_free
>= 3);
343 virtio_gpu_fence_emit(vgdev
, hdr
, fence
);
344 rc
= virtio_gpu_queue_ctrl_buffer_locked(vgdev
, vbuf
);
345 spin_unlock(&vgdev
->ctrlq
.qlock
);
349 static int virtio_gpu_queue_cursor(struct virtio_gpu_device
*vgdev
,
350 struct virtio_gpu_vbuffer
*vbuf
)
352 struct virtqueue
*vq
= vgdev
->cursorq
.vq
;
353 struct scatterlist
*sgs
[1], ccmd
;
357 if (!vgdev
->vqs_ready
)
360 sg_init_one(&ccmd
, vbuf
->buf
, vbuf
->size
);
364 spin_lock(&vgdev
->cursorq
.qlock
);
366 ret
= virtqueue_add_sgs(vq
, sgs
, outcnt
, 0, vbuf
, GFP_ATOMIC
);
367 if (ret
== -ENOSPC
) {
368 spin_unlock(&vgdev
->cursorq
.qlock
);
369 wait_event(vgdev
->cursorq
.ack_queue
, vq
->num_free
);
370 spin_lock(&vgdev
->cursorq
.qlock
);
376 spin_unlock(&vgdev
->cursorq
.qlock
);
383 /* just create gem objects for userspace and long lived objects,
384 just use dma_alloced pages for the queue objects? */
386 /* create a basic resource */
387 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device
*vgdev
,
388 uint32_t resource_id
,
393 struct virtio_gpu_resource_create_2d
*cmd_p
;
394 struct virtio_gpu_vbuffer
*vbuf
;
396 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
397 memset(cmd_p
, 0, sizeof(*cmd_p
));
399 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
);
400 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
401 cmd_p
->format
= cpu_to_le32(format
);
402 cmd_p
->width
= cpu_to_le32(width
);
403 cmd_p
->height
= cpu_to_le32(height
);
405 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
408 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device
*vgdev
,
409 uint32_t resource_id
)
411 struct virtio_gpu_resource_unref
*cmd_p
;
412 struct virtio_gpu_vbuffer
*vbuf
;
414 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
415 memset(cmd_p
, 0, sizeof(*cmd_p
));
417 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF
);
418 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
420 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
423 void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device
*vgdev
,
424 uint32_t resource_id
)
426 struct virtio_gpu_resource_detach_backing
*cmd_p
;
427 struct virtio_gpu_vbuffer
*vbuf
;
429 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
430 memset(cmd_p
, 0, sizeof(*cmd_p
));
432 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
);
433 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
435 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
438 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device
*vgdev
,
439 uint32_t scanout_id
, uint32_t resource_id
,
440 uint32_t width
, uint32_t height
,
441 uint32_t x
, uint32_t y
)
443 struct virtio_gpu_set_scanout
*cmd_p
;
444 struct virtio_gpu_vbuffer
*vbuf
;
446 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
447 memset(cmd_p
, 0, sizeof(*cmd_p
));
449 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT
);
450 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
451 cmd_p
->scanout_id
= cpu_to_le32(scanout_id
);
452 cmd_p
->r
.width
= cpu_to_le32(width
);
453 cmd_p
->r
.height
= cpu_to_le32(height
);
454 cmd_p
->r
.x
= cpu_to_le32(x
);
455 cmd_p
->r
.y
= cpu_to_le32(y
);
457 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
460 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device
*vgdev
,
461 uint32_t resource_id
,
462 uint32_t x
, uint32_t y
,
463 uint32_t width
, uint32_t height
)
465 struct virtio_gpu_resource_flush
*cmd_p
;
466 struct virtio_gpu_vbuffer
*vbuf
;
468 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
469 memset(cmd_p
, 0, sizeof(*cmd_p
));
471 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH
);
472 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
473 cmd_p
->r
.width
= cpu_to_le32(width
);
474 cmd_p
->r
.height
= cpu_to_le32(height
);
475 cmd_p
->r
.x
= cpu_to_le32(x
);
476 cmd_p
->r
.y
= cpu_to_le32(y
);
478 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
481 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device
*vgdev
,
482 uint32_t resource_id
, uint64_t offset
,
483 __le32 width
, __le32 height
,
485 struct virtio_gpu_fence
**fence
)
487 struct virtio_gpu_transfer_to_host_2d
*cmd_p
;
488 struct virtio_gpu_vbuffer
*vbuf
;
490 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
491 memset(cmd_p
, 0, sizeof(*cmd_p
));
493 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
);
494 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
495 cmd_p
->offset
= cpu_to_le64(offset
);
496 cmd_p
->r
.width
= width
;
497 cmd_p
->r
.height
= height
;
501 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, &cmd_p
->hdr
, fence
);
505 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device
*vgdev
,
506 uint32_t resource_id
,
507 struct virtio_gpu_mem_entry
*ents
,
509 struct virtio_gpu_fence
**fence
)
511 struct virtio_gpu_resource_attach_backing
*cmd_p
;
512 struct virtio_gpu_vbuffer
*vbuf
;
514 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
515 memset(cmd_p
, 0, sizeof(*cmd_p
));
517 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
);
518 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
519 cmd_p
->nr_entries
= cpu_to_le32(nents
);
521 vbuf
->data_buf
= ents
;
522 vbuf
->data_size
= sizeof(*ents
) * nents
;
524 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, &cmd_p
->hdr
, fence
);
527 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device
*vgdev
,
528 struct virtio_gpu_vbuffer
*vbuf
)
530 struct virtio_gpu_resp_display_info
*resp
=
531 (struct virtio_gpu_resp_display_info
*)vbuf
->resp_buf
;
534 spin_lock(&vgdev
->display_info_lock
);
535 for (i
= 0; i
< vgdev
->num_scanouts
; i
++) {
536 vgdev
->outputs
[i
].info
= resp
->pmodes
[i
];
537 if (resp
->pmodes
[i
].enabled
) {
538 DRM_DEBUG("output %d: %dx%d+%d+%d", i
,
539 le32_to_cpu(resp
->pmodes
[i
].r
.width
),
540 le32_to_cpu(resp
->pmodes
[i
].r
.height
),
541 le32_to_cpu(resp
->pmodes
[i
].r
.x
),
542 le32_to_cpu(resp
->pmodes
[i
].r
.y
));
544 DRM_DEBUG("output %d: disabled", i
);
548 vgdev
->display_info_pending
= false;
549 spin_unlock(&vgdev
->display_info_lock
);
550 wake_up(&vgdev
->resp_wq
);
552 if (!drm_helper_hpd_irq_event(vgdev
->ddev
))
553 drm_kms_helper_hotplug_event(vgdev
->ddev
);
556 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device
*vgdev
,
557 struct virtio_gpu_vbuffer
*vbuf
)
559 struct virtio_gpu_get_capset_info
*cmd
=
560 (struct virtio_gpu_get_capset_info
*)vbuf
->buf
;
561 struct virtio_gpu_resp_capset_info
*resp
=
562 (struct virtio_gpu_resp_capset_info
*)vbuf
->resp_buf
;
563 int i
= le32_to_cpu(cmd
->capset_index
);
565 spin_lock(&vgdev
->display_info_lock
);
566 vgdev
->capsets
[i
].id
= le32_to_cpu(resp
->capset_id
);
567 vgdev
->capsets
[i
].max_version
= le32_to_cpu(resp
->capset_max_version
);
568 vgdev
->capsets
[i
].max_size
= le32_to_cpu(resp
->capset_max_size
);
569 spin_unlock(&vgdev
->display_info_lock
);
570 wake_up(&vgdev
->resp_wq
);
573 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device
*vgdev
,
574 struct virtio_gpu_vbuffer
*vbuf
)
576 struct virtio_gpu_get_capset
*cmd
=
577 (struct virtio_gpu_get_capset
*)vbuf
->buf
;
578 struct virtio_gpu_resp_capset
*resp
=
579 (struct virtio_gpu_resp_capset
*)vbuf
->resp_buf
;
580 struct virtio_gpu_drv_cap_cache
*cache_ent
;
582 spin_lock(&vgdev
->display_info_lock
);
583 list_for_each_entry(cache_ent
, &vgdev
->cap_cache
, head
) {
584 if (cache_ent
->version
== le32_to_cpu(cmd
->capset_version
) &&
585 cache_ent
->id
== le32_to_cpu(cmd
->capset_id
)) {
586 memcpy(cache_ent
->caps_cache
, resp
->capset_data
,
588 atomic_set(&cache_ent
->is_valid
, 1);
592 spin_unlock(&vgdev
->display_info_lock
);
593 wake_up(&vgdev
->resp_wq
);
597 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device
*vgdev
)
599 struct virtio_gpu_ctrl_hdr
*cmd_p
;
600 struct virtio_gpu_vbuffer
*vbuf
;
603 resp_buf
= kzalloc(sizeof(struct virtio_gpu_resp_display_info
),
608 cmd_p
= virtio_gpu_alloc_cmd_resp
609 (vgdev
, &virtio_gpu_cmd_get_display_info_cb
, &vbuf
,
610 sizeof(*cmd_p
), sizeof(struct virtio_gpu_resp_display_info
),
612 memset(cmd_p
, 0, sizeof(*cmd_p
));
614 vgdev
->display_info_pending
= true;
615 cmd_p
->type
= cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO
);
616 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
620 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device
*vgdev
, int idx
)
622 struct virtio_gpu_get_capset_info
*cmd_p
;
623 struct virtio_gpu_vbuffer
*vbuf
;
626 resp_buf
= kzalloc(sizeof(struct virtio_gpu_resp_capset_info
),
631 cmd_p
= virtio_gpu_alloc_cmd_resp
632 (vgdev
, &virtio_gpu_cmd_get_capset_info_cb
, &vbuf
,
633 sizeof(*cmd_p
), sizeof(struct virtio_gpu_resp_capset_info
),
635 memset(cmd_p
, 0, sizeof(*cmd_p
));
637 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO
);
638 cmd_p
->capset_index
= cpu_to_le32(idx
);
639 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
643 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device
*vgdev
,
644 int idx
, int version
,
645 struct virtio_gpu_drv_cap_cache
**cache_p
)
647 struct virtio_gpu_get_capset
*cmd_p
;
648 struct virtio_gpu_vbuffer
*vbuf
;
649 int max_size
= vgdev
->capsets
[idx
].max_size
;
650 struct virtio_gpu_drv_cap_cache
*cache_ent
;
653 if (idx
> vgdev
->num_capsets
)
656 if (version
> vgdev
->capsets
[idx
].max_version
)
659 cache_ent
= kzalloc(sizeof(*cache_ent
), GFP_KERNEL
);
663 cache_ent
->caps_cache
= kmalloc(max_size
, GFP_KERNEL
);
664 if (!cache_ent
->caps_cache
) {
669 resp_buf
= kzalloc(sizeof(struct virtio_gpu_resp_capset
) + max_size
,
672 kfree(cache_ent
->caps_cache
);
677 cache_ent
->version
= version
;
678 cache_ent
->id
= vgdev
->capsets
[idx
].id
;
679 atomic_set(&cache_ent
->is_valid
, 0);
680 cache_ent
->size
= max_size
;
681 spin_lock(&vgdev
->display_info_lock
);
682 list_add_tail(&cache_ent
->head
, &vgdev
->cap_cache
);
683 spin_unlock(&vgdev
->display_info_lock
);
685 cmd_p
= virtio_gpu_alloc_cmd_resp
686 (vgdev
, &virtio_gpu_cmd_capset_cb
, &vbuf
, sizeof(*cmd_p
),
687 sizeof(struct virtio_gpu_resp_capset
) + max_size
,
689 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET
);
690 cmd_p
->capset_id
= cpu_to_le32(vgdev
->capsets
[idx
].id
);
691 cmd_p
->capset_version
= cpu_to_le32(version
);
692 *cache_p
= cache_ent
;
693 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
698 void virtio_gpu_cmd_context_create(struct virtio_gpu_device
*vgdev
, uint32_t id
,
699 uint32_t nlen
, const char *name
)
701 struct virtio_gpu_ctx_create
*cmd_p
;
702 struct virtio_gpu_vbuffer
*vbuf
;
704 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
705 memset(cmd_p
, 0, sizeof(*cmd_p
));
707 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE
);
708 cmd_p
->hdr
.ctx_id
= cpu_to_le32(id
);
709 cmd_p
->nlen
= cpu_to_le32(nlen
);
710 strncpy(cmd_p
->debug_name
, name
, sizeof(cmd_p
->debug_name
)-1);
711 cmd_p
->debug_name
[sizeof(cmd_p
->debug_name
)-1] = 0;
712 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
715 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device
*vgdev
,
718 struct virtio_gpu_ctx_destroy
*cmd_p
;
719 struct virtio_gpu_vbuffer
*vbuf
;
721 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
722 memset(cmd_p
, 0, sizeof(*cmd_p
));
724 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY
);
725 cmd_p
->hdr
.ctx_id
= cpu_to_le32(id
);
726 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
729 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device
*vgdev
,
731 uint32_t resource_id
)
733 struct virtio_gpu_ctx_resource
*cmd_p
;
734 struct virtio_gpu_vbuffer
*vbuf
;
736 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
737 memset(cmd_p
, 0, sizeof(*cmd_p
));
739 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE
);
740 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
741 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
742 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
746 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device
*vgdev
,
748 uint32_t resource_id
)
750 struct virtio_gpu_ctx_resource
*cmd_p
;
751 struct virtio_gpu_vbuffer
*vbuf
;
753 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
754 memset(cmd_p
, 0, sizeof(*cmd_p
));
756 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE
);
757 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
758 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
759 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
763 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device
*vgdev
,
764 struct virtio_gpu_resource_create_3d
*rc_3d
,
765 struct virtio_gpu_fence
**fence
)
767 struct virtio_gpu_resource_create_3d
*cmd_p
;
768 struct virtio_gpu_vbuffer
*vbuf
;
770 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
771 memset(cmd_p
, 0, sizeof(*cmd_p
));
774 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D
);
775 cmd_p
->hdr
.flags
= 0;
777 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, &cmd_p
->hdr
, fence
);
780 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device
*vgdev
,
781 uint32_t resource_id
, uint32_t ctx_id
,
782 uint64_t offset
, uint32_t level
,
783 struct virtio_gpu_box
*box
,
784 struct virtio_gpu_fence
**fence
)
786 struct virtio_gpu_transfer_host_3d
*cmd_p
;
787 struct virtio_gpu_vbuffer
*vbuf
;
789 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
790 memset(cmd_p
, 0, sizeof(*cmd_p
));
792 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D
);
793 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
794 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
796 cmd_p
->offset
= cpu_to_le64(offset
);
797 cmd_p
->level
= cpu_to_le32(level
);
799 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, &cmd_p
->hdr
, fence
);
802 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device
*vgdev
,
803 uint32_t resource_id
, uint32_t ctx_id
,
804 uint64_t offset
, uint32_t level
,
805 struct virtio_gpu_box
*box
,
806 struct virtio_gpu_fence
**fence
)
808 struct virtio_gpu_transfer_host_3d
*cmd_p
;
809 struct virtio_gpu_vbuffer
*vbuf
;
811 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
812 memset(cmd_p
, 0, sizeof(*cmd_p
));
814 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D
);
815 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
816 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
818 cmd_p
->offset
= cpu_to_le64(offset
);
819 cmd_p
->level
= cpu_to_le32(level
);
821 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, &cmd_p
->hdr
, fence
);
824 void virtio_gpu_cmd_submit(struct virtio_gpu_device
*vgdev
,
825 void *data
, uint32_t data_size
,
826 uint32_t ctx_id
, struct virtio_gpu_fence
**fence
)
828 struct virtio_gpu_cmd_submit
*cmd_p
;
829 struct virtio_gpu_vbuffer
*vbuf
;
831 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
832 memset(cmd_p
, 0, sizeof(*cmd_p
));
834 vbuf
->data_buf
= data
;
835 vbuf
->data_size
= data_size
;
837 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D
);
838 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
839 cmd_p
->size
= cpu_to_le32(data_size
);
841 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, &cmd_p
->hdr
, fence
);
844 int virtio_gpu_object_attach(struct virtio_gpu_device
*vgdev
,
845 struct virtio_gpu_object
*obj
,
846 uint32_t resource_id
,
847 struct virtio_gpu_fence
**fence
)
849 struct virtio_gpu_mem_entry
*ents
;
850 struct scatterlist
*sg
;
855 ret
= virtio_gpu_object_get_sg_table(vgdev
, obj
);
860 /* gets freed when the ring has consumed it */
861 ents
= kmalloc_array(obj
->pages
->nents
,
862 sizeof(struct virtio_gpu_mem_entry
),
865 DRM_ERROR("failed to allocate ent list\n");
869 for_each_sg(obj
->pages
->sgl
, sg
, obj
->pages
->nents
, si
) {
870 ents
[si
].addr
= cpu_to_le64(sg_phys(sg
));
871 ents
[si
].length
= cpu_to_le32(sg
->length
);
872 ents
[si
].padding
= 0;
875 virtio_gpu_cmd_resource_attach_backing(vgdev
, resource_id
,
876 ents
, obj
->pages
->nents
,
878 obj
->hw_res_handle
= resource_id
;
882 void virtio_gpu_cursor_ping(struct virtio_gpu_device
*vgdev
,
883 struct virtio_gpu_output
*output
)
885 struct virtio_gpu_vbuffer
*vbuf
;
886 struct virtio_gpu_update_cursor
*cur_p
;
888 output
->cursor
.pos
.scanout_id
= cpu_to_le32(output
->index
);
889 cur_p
= virtio_gpu_alloc_cursor(vgdev
, &vbuf
);
890 memcpy(cur_p
, &output
->cursor
, sizeof(output
->cursor
));
891 virtio_gpu_queue_cursor(vgdev
, vbuf
);