2 * Copyright (C) 2015 Red Hat, Inc.
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
29 #include <linux/dma-mapping.h>
30 #include <linux/virtio.h>
31 #include <linux/virtio_config.h>
32 #include <linux/virtio_ring.h>
34 #include "virtgpu_drv.h"
35 #include "virtgpu_trace.h"
37 #define MAX_INLINE_CMD_SIZE 96
38 #define MAX_INLINE_RESP_SIZE 24
39 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
40 + MAX_INLINE_CMD_SIZE \
41 + MAX_INLINE_RESP_SIZE)
43 static void convert_to_hw_box(struct virtio_gpu_box
*dst
,
44 const struct drm_virtgpu_3d_box
*src
)
46 dst
->x
= cpu_to_le32(src
->x
);
47 dst
->y
= cpu_to_le32(src
->y
);
48 dst
->z
= cpu_to_le32(src
->z
);
49 dst
->w
= cpu_to_le32(src
->w
);
50 dst
->h
= cpu_to_le32(src
->h
);
51 dst
->d
= cpu_to_le32(src
->d
);
54 void virtio_gpu_ctrl_ack(struct virtqueue
*vq
)
56 struct drm_device
*dev
= vq
->vdev
->priv
;
57 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
59 schedule_work(&vgdev
->ctrlq
.dequeue_work
);
62 void virtio_gpu_cursor_ack(struct virtqueue
*vq
)
64 struct drm_device
*dev
= vq
->vdev
->priv
;
65 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
67 schedule_work(&vgdev
->cursorq
.dequeue_work
);
70 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device
*vgdev
)
72 vgdev
->vbufs
= kmem_cache_create("virtio-gpu-vbufs",
74 __alignof__(struct virtio_gpu_vbuffer
),
81 void virtio_gpu_free_vbufs(struct virtio_gpu_device
*vgdev
)
83 kmem_cache_destroy(vgdev
->vbufs
);
87 static struct virtio_gpu_vbuffer
*
88 virtio_gpu_get_vbuf(struct virtio_gpu_device
*vgdev
,
89 int size
, int resp_size
, void *resp_buf
,
90 virtio_gpu_resp_cb resp_cb
)
92 struct virtio_gpu_vbuffer
*vbuf
;
94 vbuf
= kmem_cache_zalloc(vgdev
->vbufs
, GFP_KERNEL
);
96 return ERR_PTR(-ENOMEM
);
98 BUG_ON(size
> MAX_INLINE_CMD_SIZE
||
99 size
< sizeof(struct virtio_gpu_ctrl_hdr
));
100 vbuf
->buf
= (void *)vbuf
+ sizeof(*vbuf
);
103 vbuf
->resp_cb
= resp_cb
;
104 vbuf
->resp_size
= resp_size
;
105 if (resp_size
<= MAX_INLINE_RESP_SIZE
)
106 vbuf
->resp_buf
= (void *)vbuf
->buf
+ size
;
108 vbuf
->resp_buf
= resp_buf
;
109 BUG_ON(!vbuf
->resp_buf
);
113 static struct virtio_gpu_ctrl_hdr
*
114 virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer
*vbuf
)
116 /* this assumes a vbuf contains a command that starts with a
117 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
120 return (struct virtio_gpu_ctrl_hdr
*)vbuf
->buf
;
123 static struct virtio_gpu_update_cursor
*
124 virtio_gpu_alloc_cursor(struct virtio_gpu_device
*vgdev
,
125 struct virtio_gpu_vbuffer
**vbuffer_p
)
127 struct virtio_gpu_vbuffer
*vbuf
;
129 vbuf
= virtio_gpu_get_vbuf
130 (vgdev
, sizeof(struct virtio_gpu_update_cursor
),
134 return ERR_CAST(vbuf
);
137 return (struct virtio_gpu_update_cursor
*)vbuf
->buf
;
140 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device
*vgdev
,
141 virtio_gpu_resp_cb cb
,
142 struct virtio_gpu_vbuffer
**vbuffer_p
,
143 int cmd_size
, int resp_size
,
146 struct virtio_gpu_vbuffer
*vbuf
;
148 vbuf
= virtio_gpu_get_vbuf(vgdev
, cmd_size
,
149 resp_size
, resp_buf
, cb
);
152 return ERR_CAST(vbuf
);
155 return (struct virtio_gpu_command
*)vbuf
->buf
;
158 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device
*vgdev
,
159 struct virtio_gpu_vbuffer
**vbuffer_p
,
162 return virtio_gpu_alloc_cmd_resp(vgdev
, NULL
, vbuffer_p
, size
,
163 sizeof(struct virtio_gpu_ctrl_hdr
),
167 static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device
*vgdev
,
168 struct virtio_gpu_vbuffer
**vbuffer_p
,
170 virtio_gpu_resp_cb cb
)
172 return virtio_gpu_alloc_cmd_resp(vgdev
, cb
, vbuffer_p
, size
,
173 sizeof(struct virtio_gpu_ctrl_hdr
),
177 static void free_vbuf(struct virtio_gpu_device
*vgdev
,
178 struct virtio_gpu_vbuffer
*vbuf
)
180 if (vbuf
->resp_size
> MAX_INLINE_RESP_SIZE
)
181 kfree(vbuf
->resp_buf
);
182 kvfree(vbuf
->data_buf
);
183 kmem_cache_free(vgdev
->vbufs
, vbuf
);
186 static void reclaim_vbufs(struct virtqueue
*vq
, struct list_head
*reclaim_list
)
188 struct virtio_gpu_vbuffer
*vbuf
;
192 while ((vbuf
= virtqueue_get_buf(vq
, &len
))) {
193 list_add_tail(&vbuf
->list
, reclaim_list
);
197 DRM_DEBUG("Huh? zero vbufs reclaimed");
200 void virtio_gpu_dequeue_ctrl_func(struct work_struct
*work
)
202 struct virtio_gpu_device
*vgdev
=
203 container_of(work
, struct virtio_gpu_device
,
205 struct list_head reclaim_list
;
206 struct virtio_gpu_vbuffer
*entry
, *tmp
;
207 struct virtio_gpu_ctrl_hdr
*resp
;
210 INIT_LIST_HEAD(&reclaim_list
);
211 spin_lock(&vgdev
->ctrlq
.qlock
);
213 virtqueue_disable_cb(vgdev
->ctrlq
.vq
);
214 reclaim_vbufs(vgdev
->ctrlq
.vq
, &reclaim_list
);
216 } while (!virtqueue_enable_cb(vgdev
->ctrlq
.vq
));
217 spin_unlock(&vgdev
->ctrlq
.qlock
);
219 list_for_each_entry(entry
, &reclaim_list
, list
) {
220 resp
= (struct virtio_gpu_ctrl_hdr
*)entry
->resp_buf
;
222 trace_virtio_gpu_cmd_response(vgdev
->ctrlq
.vq
, resp
);
224 if (resp
->type
!= cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA
)) {
225 if (le32_to_cpu(resp
->type
) >= VIRTIO_GPU_RESP_ERR_UNSPEC
) {
226 struct virtio_gpu_ctrl_hdr
*cmd
;
227 cmd
= virtio_gpu_vbuf_ctrl_hdr(entry
);
228 DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
229 le32_to_cpu(resp
->type
),
230 le32_to_cpu(cmd
->type
));
232 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp
->type
));
234 if (resp
->flags
& cpu_to_le32(VIRTIO_GPU_FLAG_FENCE
)) {
235 u64 f
= le64_to_cpu(resp
->fence_id
);
238 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
239 __func__
, fence_id
, f
);
245 entry
->resp_cb(vgdev
, entry
);
247 wake_up(&vgdev
->ctrlq
.ack_queue
);
250 virtio_gpu_fence_event_process(vgdev
, fence_id
);
252 list_for_each_entry_safe(entry
, tmp
, &reclaim_list
, list
) {
254 virtio_gpu_array_put_free_delayed(vgdev
, entry
->objs
);
255 list_del(&entry
->list
);
256 free_vbuf(vgdev
, entry
);
260 void virtio_gpu_dequeue_cursor_func(struct work_struct
*work
)
262 struct virtio_gpu_device
*vgdev
=
263 container_of(work
, struct virtio_gpu_device
,
264 cursorq
.dequeue_work
);
265 struct list_head reclaim_list
;
266 struct virtio_gpu_vbuffer
*entry
, *tmp
;
268 INIT_LIST_HEAD(&reclaim_list
);
269 spin_lock(&vgdev
->cursorq
.qlock
);
271 virtqueue_disable_cb(vgdev
->cursorq
.vq
);
272 reclaim_vbufs(vgdev
->cursorq
.vq
, &reclaim_list
);
273 } while (!virtqueue_enable_cb(vgdev
->cursorq
.vq
));
274 spin_unlock(&vgdev
->cursorq
.qlock
);
276 list_for_each_entry_safe(entry
, tmp
, &reclaim_list
, list
) {
277 list_del(&entry
->list
);
278 free_vbuf(vgdev
, entry
);
280 wake_up(&vgdev
->cursorq
.ack_queue
);
283 /* Create sg_table from a vmalloc'd buffer. */
284 static struct sg_table
*vmalloc_to_sgt(char *data
, uint32_t size
, int *sg_ents
)
287 struct sg_table
*sgt
;
288 struct scatterlist
*sg
;
291 if (WARN_ON(!PAGE_ALIGNED(data
)))
294 sgt
= kmalloc(sizeof(*sgt
), GFP_KERNEL
);
298 *sg_ents
= DIV_ROUND_UP(size
, PAGE_SIZE
);
299 ret
= sg_alloc_table(sgt
, *sg_ents
, GFP_KERNEL
);
305 for_each_sgtable_sg(sgt
, sg
, i
) {
306 pg
= vmalloc_to_page(data
);
313 s
= min_t(int, PAGE_SIZE
, size
);
314 sg_set_page(sg
, pg
, s
, 0);
323 static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device
*vgdev
,
324 struct virtio_gpu_vbuffer
*vbuf
,
325 struct virtio_gpu_fence
*fence
,
327 struct scatterlist
**sgs
,
331 struct virtqueue
*vq
= vgdev
->ctrlq
.vq
;
334 if (!drm_dev_enter(vgdev
->ddev
, &idx
)) {
335 if (fence
&& vbuf
->objs
)
336 virtio_gpu_array_unlock_resv(vbuf
->objs
);
337 free_vbuf(vgdev
, vbuf
);
341 if (vgdev
->has_indirect
)
345 spin_lock(&vgdev
->ctrlq
.qlock
);
347 if (vq
->num_free
< elemcnt
) {
348 spin_unlock(&vgdev
->ctrlq
.qlock
);
349 virtio_gpu_notify(vgdev
);
350 wait_event(vgdev
->ctrlq
.ack_queue
, vq
->num_free
>= elemcnt
);
354 /* now that the position of the vbuf in the virtqueue is known, we can
355 * finally set the fence id
358 virtio_gpu_fence_emit(vgdev
, virtio_gpu_vbuf_ctrl_hdr(vbuf
),
361 virtio_gpu_array_add_fence(vbuf
->objs
, &fence
->f
);
362 virtio_gpu_array_unlock_resv(vbuf
->objs
);
366 ret
= virtqueue_add_sgs(vq
, sgs
, outcnt
, incnt
, vbuf
, GFP_ATOMIC
);
369 trace_virtio_gpu_cmd_queue(vq
, virtio_gpu_vbuf_ctrl_hdr(vbuf
));
371 atomic_inc(&vgdev
->pending_commands
);
373 spin_unlock(&vgdev
->ctrlq
.qlock
);
379 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device
*vgdev
,
380 struct virtio_gpu_vbuffer
*vbuf
,
381 struct virtio_gpu_fence
*fence
)
383 struct scatterlist
*sgs
[3], vcmd
, vout
, vresp
;
384 struct sg_table
*sgt
= NULL
;
385 int elemcnt
= 0, outcnt
= 0, incnt
= 0, ret
;
388 sg_init_one(&vcmd
, vbuf
->buf
, vbuf
->size
);
394 if (vbuf
->data_size
) {
395 if (is_vmalloc_addr(vbuf
->data_buf
)) {
397 sgt
= vmalloc_to_sgt(vbuf
->data_buf
, vbuf
->data_size
,
400 if (fence
&& vbuf
->objs
)
401 virtio_gpu_array_unlock_resv(vbuf
->objs
);
406 sgs
[outcnt
] = sgt
->sgl
;
408 sg_init_one(&vout
, vbuf
->data_buf
, vbuf
->data_size
);
416 if (vbuf
->resp_size
) {
417 sg_init_one(&vresp
, vbuf
->resp_buf
, vbuf
->resp_size
);
419 sgs
[outcnt
+ incnt
] = &vresp
;
423 ret
= virtio_gpu_queue_ctrl_sgs(vgdev
, vbuf
, fence
, elemcnt
, sgs
, outcnt
,
433 void virtio_gpu_notify(struct virtio_gpu_device
*vgdev
)
437 if (!atomic_read(&vgdev
->pending_commands
))
440 spin_lock(&vgdev
->ctrlq
.qlock
);
441 atomic_set(&vgdev
->pending_commands
, 0);
442 notify
= virtqueue_kick_prepare(vgdev
->ctrlq
.vq
);
443 spin_unlock(&vgdev
->ctrlq
.qlock
);
446 virtqueue_notify(vgdev
->ctrlq
.vq
);
449 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device
*vgdev
,
450 struct virtio_gpu_vbuffer
*vbuf
)
452 return virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, NULL
);
455 static void virtio_gpu_queue_cursor(struct virtio_gpu_device
*vgdev
,
456 struct virtio_gpu_vbuffer
*vbuf
)
458 struct virtqueue
*vq
= vgdev
->cursorq
.vq
;
459 struct scatterlist
*sgs
[1], ccmd
;
460 int idx
, ret
, outcnt
;
463 if (!drm_dev_enter(vgdev
->ddev
, &idx
)) {
464 free_vbuf(vgdev
, vbuf
);
468 sg_init_one(&ccmd
, vbuf
->buf
, vbuf
->size
);
472 spin_lock(&vgdev
->cursorq
.qlock
);
474 ret
= virtqueue_add_sgs(vq
, sgs
, outcnt
, 0, vbuf
, GFP_ATOMIC
);
475 if (ret
== -ENOSPC
) {
476 spin_unlock(&vgdev
->cursorq
.qlock
);
477 wait_event(vgdev
->cursorq
.ack_queue
, vq
->num_free
>= outcnt
);
478 spin_lock(&vgdev
->cursorq
.qlock
);
481 trace_virtio_gpu_cmd_queue(vq
,
482 virtio_gpu_vbuf_ctrl_hdr(vbuf
));
484 notify
= virtqueue_kick_prepare(vq
);
487 spin_unlock(&vgdev
->cursorq
.qlock
);
490 virtqueue_notify(vq
);
495 /* just create gem objects for userspace and long lived objects,
496 * just use dma_alloced pages for the queue objects?
499 /* create a basic resource */
500 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device
*vgdev
,
501 struct virtio_gpu_object
*bo
,
502 struct virtio_gpu_object_params
*params
,
503 struct virtio_gpu_object_array
*objs
,
504 struct virtio_gpu_fence
*fence
)
506 struct virtio_gpu_resource_create_2d
*cmd_p
;
507 struct virtio_gpu_vbuffer
*vbuf
;
509 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
510 memset(cmd_p
, 0, sizeof(*cmd_p
));
513 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
);
514 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
515 cmd_p
->format
= cpu_to_le32(params
->format
);
516 cmd_p
->width
= cpu_to_le32(params
->width
);
517 cmd_p
->height
= cpu_to_le32(params
->height
);
519 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, fence
);
523 static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device
*vgdev
,
524 struct virtio_gpu_vbuffer
*vbuf
)
526 struct virtio_gpu_object
*bo
;
528 bo
= vbuf
->resp_cb_data
;
529 vbuf
->resp_cb_data
= NULL
;
531 virtio_gpu_cleanup_object(bo
);
534 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device
*vgdev
,
535 struct virtio_gpu_object
*bo
)
537 struct virtio_gpu_resource_unref
*cmd_p
;
538 struct virtio_gpu_vbuffer
*vbuf
;
541 cmd_p
= virtio_gpu_alloc_cmd_cb(vgdev
, &vbuf
, sizeof(*cmd_p
),
542 virtio_gpu_cmd_unref_cb
);
543 memset(cmd_p
, 0, sizeof(*cmd_p
));
545 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF
);
546 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
548 vbuf
->resp_cb_data
= bo
;
549 ret
= virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
551 virtio_gpu_cleanup_object(bo
);
554 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device
*vgdev
,
555 uint32_t scanout_id
, uint32_t resource_id
,
556 uint32_t width
, uint32_t height
,
557 uint32_t x
, uint32_t y
)
559 struct virtio_gpu_set_scanout
*cmd_p
;
560 struct virtio_gpu_vbuffer
*vbuf
;
562 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
563 memset(cmd_p
, 0, sizeof(*cmd_p
));
565 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT
);
566 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
567 cmd_p
->scanout_id
= cpu_to_le32(scanout_id
);
568 cmd_p
->r
.width
= cpu_to_le32(width
);
569 cmd_p
->r
.height
= cpu_to_le32(height
);
570 cmd_p
->r
.x
= cpu_to_le32(x
);
571 cmd_p
->r
.y
= cpu_to_le32(y
);
573 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
576 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device
*vgdev
,
577 uint32_t resource_id
,
578 uint32_t x
, uint32_t y
,
579 uint32_t width
, uint32_t height
)
581 struct virtio_gpu_resource_flush
*cmd_p
;
582 struct virtio_gpu_vbuffer
*vbuf
;
584 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
585 memset(cmd_p
, 0, sizeof(*cmd_p
));
587 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH
);
588 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
589 cmd_p
->r
.width
= cpu_to_le32(width
);
590 cmd_p
->r
.height
= cpu_to_le32(height
);
591 cmd_p
->r
.x
= cpu_to_le32(x
);
592 cmd_p
->r
.y
= cpu_to_le32(y
);
594 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
597 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device
*vgdev
,
599 uint32_t width
, uint32_t height
,
600 uint32_t x
, uint32_t y
,
601 struct virtio_gpu_object_array
*objs
,
602 struct virtio_gpu_fence
*fence
)
604 struct virtio_gpu_object
*bo
= gem_to_virtio_gpu_obj(objs
->objs
[0]);
605 struct virtio_gpu_transfer_to_host_2d
*cmd_p
;
606 struct virtio_gpu_vbuffer
*vbuf
;
607 bool use_dma_api
= !virtio_has_dma_quirk(vgdev
->vdev
);
608 struct virtio_gpu_object_shmem
*shmem
= to_virtio_gpu_shmem(bo
);
611 dma_sync_sgtable_for_device(vgdev
->vdev
->dev
.parent
,
612 shmem
->pages
, DMA_TO_DEVICE
);
614 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
615 memset(cmd_p
, 0, sizeof(*cmd_p
));
618 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
);
619 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
620 cmd_p
->offset
= cpu_to_le64(offset
);
621 cmd_p
->r
.width
= cpu_to_le32(width
);
622 cmd_p
->r
.height
= cpu_to_le32(height
);
623 cmd_p
->r
.x
= cpu_to_le32(x
);
624 cmd_p
->r
.y
= cpu_to_le32(y
);
626 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, fence
);
630 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device
*vgdev
,
631 uint32_t resource_id
,
632 struct virtio_gpu_mem_entry
*ents
,
634 struct virtio_gpu_fence
*fence
)
636 struct virtio_gpu_resource_attach_backing
*cmd_p
;
637 struct virtio_gpu_vbuffer
*vbuf
;
639 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
640 memset(cmd_p
, 0, sizeof(*cmd_p
));
642 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
);
643 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
644 cmd_p
->nr_entries
= cpu_to_le32(nents
);
646 vbuf
->data_buf
= ents
;
647 vbuf
->data_size
= sizeof(*ents
) * nents
;
649 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, fence
);
652 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device
*vgdev
,
653 struct virtio_gpu_vbuffer
*vbuf
)
655 struct virtio_gpu_resp_display_info
*resp
=
656 (struct virtio_gpu_resp_display_info
*)vbuf
->resp_buf
;
659 spin_lock(&vgdev
->display_info_lock
);
660 for (i
= 0; i
< vgdev
->num_scanouts
; i
++) {
661 vgdev
->outputs
[i
].info
= resp
->pmodes
[i
];
662 if (resp
->pmodes
[i
].enabled
) {
663 DRM_DEBUG("output %d: %dx%d+%d+%d", i
,
664 le32_to_cpu(resp
->pmodes
[i
].r
.width
),
665 le32_to_cpu(resp
->pmodes
[i
].r
.height
),
666 le32_to_cpu(resp
->pmodes
[i
].r
.x
),
667 le32_to_cpu(resp
->pmodes
[i
].r
.y
));
669 DRM_DEBUG("output %d: disabled", i
);
673 vgdev
->display_info_pending
= false;
674 spin_unlock(&vgdev
->display_info_lock
);
675 wake_up(&vgdev
->resp_wq
);
677 if (!drm_helper_hpd_irq_event(vgdev
->ddev
))
678 drm_kms_helper_hotplug_event(vgdev
->ddev
);
681 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device
*vgdev
,
682 struct virtio_gpu_vbuffer
*vbuf
)
684 struct virtio_gpu_get_capset_info
*cmd
=
685 (struct virtio_gpu_get_capset_info
*)vbuf
->buf
;
686 struct virtio_gpu_resp_capset_info
*resp
=
687 (struct virtio_gpu_resp_capset_info
*)vbuf
->resp_buf
;
688 int i
= le32_to_cpu(cmd
->capset_index
);
690 spin_lock(&vgdev
->display_info_lock
);
691 if (vgdev
->capsets
) {
692 vgdev
->capsets
[i
].id
= le32_to_cpu(resp
->capset_id
);
693 vgdev
->capsets
[i
].max_version
= le32_to_cpu(resp
->capset_max_version
);
694 vgdev
->capsets
[i
].max_size
= le32_to_cpu(resp
->capset_max_size
);
696 DRM_ERROR("invalid capset memory.");
698 spin_unlock(&vgdev
->display_info_lock
);
699 wake_up(&vgdev
->resp_wq
);
702 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device
*vgdev
,
703 struct virtio_gpu_vbuffer
*vbuf
)
705 struct virtio_gpu_get_capset
*cmd
=
706 (struct virtio_gpu_get_capset
*)vbuf
->buf
;
707 struct virtio_gpu_resp_capset
*resp
=
708 (struct virtio_gpu_resp_capset
*)vbuf
->resp_buf
;
709 struct virtio_gpu_drv_cap_cache
*cache_ent
;
711 spin_lock(&vgdev
->display_info_lock
);
712 list_for_each_entry(cache_ent
, &vgdev
->cap_cache
, head
) {
713 if (cache_ent
->version
== le32_to_cpu(cmd
->capset_version
) &&
714 cache_ent
->id
== le32_to_cpu(cmd
->capset_id
)) {
715 memcpy(cache_ent
->caps_cache
, resp
->capset_data
,
717 /* Copy must occur before is_valid is signalled. */
719 atomic_set(&cache_ent
->is_valid
, 1);
723 spin_unlock(&vgdev
->display_info_lock
);
724 wake_up_all(&vgdev
->resp_wq
);
727 static int virtio_get_edid_block(void *data
, u8
*buf
,
728 unsigned int block
, size_t len
)
730 struct virtio_gpu_resp_edid
*resp
= data
;
731 size_t start
= block
* EDID_LENGTH
;
733 if (start
+ len
> le32_to_cpu(resp
->size
))
735 memcpy(buf
, resp
->edid
+ start
, len
);
739 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device
*vgdev
,
740 struct virtio_gpu_vbuffer
*vbuf
)
742 struct virtio_gpu_cmd_get_edid
*cmd
=
743 (struct virtio_gpu_cmd_get_edid
*)vbuf
->buf
;
744 struct virtio_gpu_resp_edid
*resp
=
745 (struct virtio_gpu_resp_edid
*)vbuf
->resp_buf
;
746 uint32_t scanout
= le32_to_cpu(cmd
->scanout
);
747 struct virtio_gpu_output
*output
;
748 struct edid
*new_edid
, *old_edid
;
750 if (scanout
>= vgdev
->num_scanouts
)
752 output
= vgdev
->outputs
+ scanout
;
754 new_edid
= drm_do_get_edid(&output
->conn
, virtio_get_edid_block
, resp
);
755 drm_connector_update_edid_property(&output
->conn
, new_edid
);
757 spin_lock(&vgdev
->display_info_lock
);
758 old_edid
= output
->edid
;
759 output
->edid
= new_edid
;
760 spin_unlock(&vgdev
->display_info_lock
);
763 wake_up(&vgdev
->resp_wq
);
766 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device
*vgdev
)
768 struct virtio_gpu_ctrl_hdr
*cmd_p
;
769 struct virtio_gpu_vbuffer
*vbuf
;
772 resp_buf
= kzalloc(sizeof(struct virtio_gpu_resp_display_info
),
777 cmd_p
= virtio_gpu_alloc_cmd_resp
778 (vgdev
, &virtio_gpu_cmd_get_display_info_cb
, &vbuf
,
779 sizeof(*cmd_p
), sizeof(struct virtio_gpu_resp_display_info
),
781 memset(cmd_p
, 0, sizeof(*cmd_p
));
783 vgdev
->display_info_pending
= true;
784 cmd_p
->type
= cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO
);
785 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
789 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device
*vgdev
, int idx
)
791 struct virtio_gpu_get_capset_info
*cmd_p
;
792 struct virtio_gpu_vbuffer
*vbuf
;
795 resp_buf
= kzalloc(sizeof(struct virtio_gpu_resp_capset_info
),
800 cmd_p
= virtio_gpu_alloc_cmd_resp
801 (vgdev
, &virtio_gpu_cmd_get_capset_info_cb
, &vbuf
,
802 sizeof(*cmd_p
), sizeof(struct virtio_gpu_resp_capset_info
),
804 memset(cmd_p
, 0, sizeof(*cmd_p
));
806 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO
);
807 cmd_p
->capset_index
= cpu_to_le32(idx
);
808 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
812 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device
*vgdev
,
813 int idx
, int version
,
814 struct virtio_gpu_drv_cap_cache
**cache_p
)
816 struct virtio_gpu_get_capset
*cmd_p
;
817 struct virtio_gpu_vbuffer
*vbuf
;
819 struct virtio_gpu_drv_cap_cache
*cache_ent
;
820 struct virtio_gpu_drv_cap_cache
*search_ent
;
825 if (idx
>= vgdev
->num_capsets
)
828 if (version
> vgdev
->capsets
[idx
].max_version
)
831 cache_ent
= kzalloc(sizeof(*cache_ent
), GFP_KERNEL
);
835 max_size
= vgdev
->capsets
[idx
].max_size
;
836 cache_ent
->caps_cache
= kmalloc(max_size
, GFP_KERNEL
);
837 if (!cache_ent
->caps_cache
) {
842 resp_buf
= kzalloc(sizeof(struct virtio_gpu_resp_capset
) + max_size
,
845 kfree(cache_ent
->caps_cache
);
850 cache_ent
->version
= version
;
851 cache_ent
->id
= vgdev
->capsets
[idx
].id
;
852 atomic_set(&cache_ent
->is_valid
, 0);
853 cache_ent
->size
= max_size
;
854 spin_lock(&vgdev
->display_info_lock
);
855 /* Search while under lock in case it was added by another task. */
856 list_for_each_entry(search_ent
, &vgdev
->cap_cache
, head
) {
857 if (search_ent
->id
== vgdev
->capsets
[idx
].id
&&
858 search_ent
->version
== version
) {
859 *cache_p
= search_ent
;
864 list_add_tail(&cache_ent
->head
, &vgdev
->cap_cache
);
865 spin_unlock(&vgdev
->display_info_lock
);
868 /* Entry was found, so free everything that was just created. */
870 kfree(cache_ent
->caps_cache
);
875 cmd_p
= virtio_gpu_alloc_cmd_resp
876 (vgdev
, &virtio_gpu_cmd_capset_cb
, &vbuf
, sizeof(*cmd_p
),
877 sizeof(struct virtio_gpu_resp_capset
) + max_size
,
879 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET
);
880 cmd_p
->capset_id
= cpu_to_le32(vgdev
->capsets
[idx
].id
);
881 cmd_p
->capset_version
= cpu_to_le32(version
);
882 *cache_p
= cache_ent
;
883 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
888 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device
*vgdev
)
890 struct virtio_gpu_cmd_get_edid
*cmd_p
;
891 struct virtio_gpu_vbuffer
*vbuf
;
895 if (WARN_ON(!vgdev
->has_edid
))
898 for (scanout
= 0; scanout
< vgdev
->num_scanouts
; scanout
++) {
899 resp_buf
= kzalloc(sizeof(struct virtio_gpu_resp_edid
),
904 cmd_p
= virtio_gpu_alloc_cmd_resp
905 (vgdev
, &virtio_gpu_cmd_get_edid_cb
, &vbuf
,
906 sizeof(*cmd_p
), sizeof(struct virtio_gpu_resp_edid
),
908 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID
);
909 cmd_p
->scanout
= cpu_to_le32(scanout
);
910 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
916 void virtio_gpu_cmd_context_create(struct virtio_gpu_device
*vgdev
, uint32_t id
,
917 uint32_t nlen
, const char *name
)
919 struct virtio_gpu_ctx_create
*cmd_p
;
920 struct virtio_gpu_vbuffer
*vbuf
;
922 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
923 memset(cmd_p
, 0, sizeof(*cmd_p
));
925 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE
);
926 cmd_p
->hdr
.ctx_id
= cpu_to_le32(id
);
927 cmd_p
->nlen
= cpu_to_le32(nlen
);
928 strncpy(cmd_p
->debug_name
, name
, sizeof(cmd_p
->debug_name
) - 1);
929 cmd_p
->debug_name
[sizeof(cmd_p
->debug_name
) - 1] = 0;
930 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
933 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device
*vgdev
,
936 struct virtio_gpu_ctx_destroy
*cmd_p
;
937 struct virtio_gpu_vbuffer
*vbuf
;
939 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
940 memset(cmd_p
, 0, sizeof(*cmd_p
));
942 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY
);
943 cmd_p
->hdr
.ctx_id
= cpu_to_le32(id
);
944 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
947 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device
*vgdev
,
949 struct virtio_gpu_object_array
*objs
)
951 struct virtio_gpu_object
*bo
= gem_to_virtio_gpu_obj(objs
->objs
[0]);
952 struct virtio_gpu_ctx_resource
*cmd_p
;
953 struct virtio_gpu_vbuffer
*vbuf
;
955 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
956 memset(cmd_p
, 0, sizeof(*cmd_p
));
959 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE
);
960 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
961 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
962 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
965 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device
*vgdev
,
967 struct virtio_gpu_object_array
*objs
)
969 struct virtio_gpu_object
*bo
= gem_to_virtio_gpu_obj(objs
->objs
[0]);
970 struct virtio_gpu_ctx_resource
*cmd_p
;
971 struct virtio_gpu_vbuffer
*vbuf
;
973 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
974 memset(cmd_p
, 0, sizeof(*cmd_p
));
977 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE
);
978 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
979 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
980 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
984 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device
*vgdev
,
985 struct virtio_gpu_object
*bo
,
986 struct virtio_gpu_object_params
*params
,
987 struct virtio_gpu_object_array
*objs
,
988 struct virtio_gpu_fence
*fence
)
990 struct virtio_gpu_resource_create_3d
*cmd_p
;
991 struct virtio_gpu_vbuffer
*vbuf
;
993 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
994 memset(cmd_p
, 0, sizeof(*cmd_p
));
997 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D
);
998 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
999 cmd_p
->format
= cpu_to_le32(params
->format
);
1000 cmd_p
->width
= cpu_to_le32(params
->width
);
1001 cmd_p
->height
= cpu_to_le32(params
->height
);
1003 cmd_p
->target
= cpu_to_le32(params
->target
);
1004 cmd_p
->bind
= cpu_to_le32(params
->bind
);
1005 cmd_p
->depth
= cpu_to_le32(params
->depth
);
1006 cmd_p
->array_size
= cpu_to_le32(params
->array_size
);
1007 cmd_p
->last_level
= cpu_to_le32(params
->last_level
);
1008 cmd_p
->nr_samples
= cpu_to_le32(params
->nr_samples
);
1009 cmd_p
->flags
= cpu_to_le32(params
->flags
);
1011 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, fence
);
1016 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device
*vgdev
,
1018 uint64_t offset
, uint32_t level
,
1020 uint32_t layer_stride
,
1021 struct drm_virtgpu_3d_box
*box
,
1022 struct virtio_gpu_object_array
*objs
,
1023 struct virtio_gpu_fence
*fence
)
1025 struct virtio_gpu_object
*bo
= gem_to_virtio_gpu_obj(objs
->objs
[0]);
1026 struct virtio_gpu_transfer_host_3d
*cmd_p
;
1027 struct virtio_gpu_vbuffer
*vbuf
;
1028 bool use_dma_api
= !virtio_has_dma_quirk(vgdev
->vdev
);
1030 if (virtio_gpu_is_shmem(bo
) && use_dma_api
) {
1031 struct virtio_gpu_object_shmem
*shmem
= to_virtio_gpu_shmem(bo
);
1032 dma_sync_sgtable_for_device(vgdev
->vdev
->dev
.parent
,
1033 shmem
->pages
, DMA_TO_DEVICE
);
1036 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
1037 memset(cmd_p
, 0, sizeof(*cmd_p
));
1041 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D
);
1042 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
1043 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
1044 convert_to_hw_box(&cmd_p
->box
, box
);
1045 cmd_p
->offset
= cpu_to_le64(offset
);
1046 cmd_p
->level
= cpu_to_le32(level
);
1047 cmd_p
->stride
= cpu_to_le32(stride
);
1048 cmd_p
->layer_stride
= cpu_to_le32(layer_stride
);
1050 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, fence
);
1053 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device
*vgdev
,
1055 uint64_t offset
, uint32_t level
,
1057 uint32_t layer_stride
,
1058 struct drm_virtgpu_3d_box
*box
,
1059 struct virtio_gpu_object_array
*objs
,
1060 struct virtio_gpu_fence
*fence
)
1062 struct virtio_gpu_object
*bo
= gem_to_virtio_gpu_obj(objs
->objs
[0]);
1063 struct virtio_gpu_transfer_host_3d
*cmd_p
;
1064 struct virtio_gpu_vbuffer
*vbuf
;
1066 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
1067 memset(cmd_p
, 0, sizeof(*cmd_p
));
1071 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D
);
1072 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
1073 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
1074 convert_to_hw_box(&cmd_p
->box
, box
);
1075 cmd_p
->offset
= cpu_to_le64(offset
);
1076 cmd_p
->level
= cpu_to_le32(level
);
1077 cmd_p
->stride
= cpu_to_le32(stride
);
1078 cmd_p
->layer_stride
= cpu_to_le32(layer_stride
);
1080 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, fence
);
1083 void virtio_gpu_cmd_submit(struct virtio_gpu_device
*vgdev
,
1084 void *data
, uint32_t data_size
,
1086 struct virtio_gpu_object_array
*objs
,
1087 struct virtio_gpu_fence
*fence
)
1089 struct virtio_gpu_cmd_submit
*cmd_p
;
1090 struct virtio_gpu_vbuffer
*vbuf
;
1092 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
1093 memset(cmd_p
, 0, sizeof(*cmd_p
));
1095 vbuf
->data_buf
= data
;
1096 vbuf
->data_size
= data_size
;
1099 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D
);
1100 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
1101 cmd_p
->size
= cpu_to_le32(data_size
);
1103 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, fence
);
1106 void virtio_gpu_object_attach(struct virtio_gpu_device
*vgdev
,
1107 struct virtio_gpu_object
*obj
,
1108 struct virtio_gpu_mem_entry
*ents
,
1111 virtio_gpu_cmd_resource_attach_backing(vgdev
, obj
->hw_res_handle
,
1115 void virtio_gpu_cursor_ping(struct virtio_gpu_device
*vgdev
,
1116 struct virtio_gpu_output
*output
)
1118 struct virtio_gpu_vbuffer
*vbuf
;
1119 struct virtio_gpu_update_cursor
*cur_p
;
1121 output
->cursor
.pos
.scanout_id
= cpu_to_le32(output
->index
);
1122 cur_p
= virtio_gpu_alloc_cursor(vgdev
, &vbuf
);
1123 memcpy(cur_p
, &output
->cursor
, sizeof(output
->cursor
));
1124 virtio_gpu_queue_cursor(vgdev
, vbuf
);
1127 static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device
*vgdev
,
1128 struct virtio_gpu_vbuffer
*vbuf
)
1130 struct virtio_gpu_object
*obj
=
1131 gem_to_virtio_gpu_obj(vbuf
->objs
->objs
[0]);
1132 struct virtio_gpu_resp_resource_uuid
*resp
=
1133 (struct virtio_gpu_resp_resource_uuid
*)vbuf
->resp_buf
;
1134 uint32_t resp_type
= le32_to_cpu(resp
->hdr
.type
);
1136 spin_lock(&vgdev
->resource_export_lock
);
1137 WARN_ON(obj
->uuid_state
!= STATE_INITIALIZING
);
1139 if (resp_type
== VIRTIO_GPU_RESP_OK_RESOURCE_UUID
&&
1140 obj
->uuid_state
== STATE_INITIALIZING
) {
1141 import_uuid(&obj
->uuid
, resp
->uuid
);
1142 obj
->uuid_state
= STATE_OK
;
1144 obj
->uuid_state
= STATE_ERR
;
1146 spin_unlock(&vgdev
->resource_export_lock
);
1148 wake_up_all(&vgdev
->resp_wq
);
1152 virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device
*vgdev
,
1153 struct virtio_gpu_object_array
*objs
)
1155 struct virtio_gpu_object
*bo
= gem_to_virtio_gpu_obj(objs
->objs
[0]);
1156 struct virtio_gpu_resource_assign_uuid
*cmd_p
;
1157 struct virtio_gpu_vbuffer
*vbuf
;
1158 struct virtio_gpu_resp_resource_uuid
*resp_buf
;
1160 resp_buf
= kzalloc(sizeof(*resp_buf
), GFP_KERNEL
);
1162 spin_lock(&vgdev
->resource_export_lock
);
1163 bo
->uuid_state
= STATE_ERR
;
1164 spin_unlock(&vgdev
->resource_export_lock
);
1165 virtio_gpu_array_put_free(objs
);
1169 cmd_p
= virtio_gpu_alloc_cmd_resp
1170 (vgdev
, virtio_gpu_cmd_resource_uuid_cb
, &vbuf
, sizeof(*cmd_p
),
1171 sizeof(struct virtio_gpu_resp_resource_uuid
), resp_buf
);
1172 memset(cmd_p
, 0, sizeof(*cmd_p
));
1174 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID
);
1175 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
1178 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
1182 static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device
*vgdev
,
1183 struct virtio_gpu_vbuffer
*vbuf
)
1185 struct virtio_gpu_object
*bo
=
1186 gem_to_virtio_gpu_obj(vbuf
->objs
->objs
[0]);
1187 struct virtio_gpu_resp_map_info
*resp
=
1188 (struct virtio_gpu_resp_map_info
*)vbuf
->resp_buf
;
1189 struct virtio_gpu_object_vram
*vram
= to_virtio_gpu_vram(bo
);
1190 uint32_t resp_type
= le32_to_cpu(resp
->hdr
.type
);
1192 spin_lock(&vgdev
->host_visible_lock
);
1194 if (resp_type
== VIRTIO_GPU_RESP_OK_MAP_INFO
) {
1195 vram
->map_info
= resp
->map_info
;
1196 vram
->map_state
= STATE_OK
;
1198 vram
->map_state
= STATE_ERR
;
1201 spin_unlock(&vgdev
->host_visible_lock
);
1202 wake_up_all(&vgdev
->resp_wq
);
1205 int virtio_gpu_cmd_map(struct virtio_gpu_device
*vgdev
,
1206 struct virtio_gpu_object_array
*objs
, uint64_t offset
)
1208 struct virtio_gpu_resource_map_blob
*cmd_p
;
1209 struct virtio_gpu_object
*bo
= gem_to_virtio_gpu_obj(objs
->objs
[0]);
1210 struct virtio_gpu_vbuffer
*vbuf
;
1211 struct virtio_gpu_resp_map_info
*resp_buf
;
1213 resp_buf
= kzalloc(sizeof(*resp_buf
), GFP_KERNEL
);
1217 cmd_p
= virtio_gpu_alloc_cmd_resp
1218 (vgdev
, virtio_gpu_cmd_resource_map_cb
, &vbuf
, sizeof(*cmd_p
),
1219 sizeof(struct virtio_gpu_resp_map_info
), resp_buf
);
1220 memset(cmd_p
, 0, sizeof(*cmd_p
));
1222 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB
);
1223 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
1224 cmd_p
->offset
= cpu_to_le64(offset
);
1227 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
1231 void virtio_gpu_cmd_unmap(struct virtio_gpu_device
*vgdev
,
1232 struct virtio_gpu_object
*bo
)
1234 struct virtio_gpu_resource_unmap_blob
*cmd_p
;
1235 struct virtio_gpu_vbuffer
*vbuf
;
1237 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
1238 memset(cmd_p
, 0, sizeof(*cmd_p
));
1240 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB
);
1241 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
1243 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
1247 virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device
*vgdev
,
1248 struct virtio_gpu_object
*bo
,
1249 struct virtio_gpu_object_params
*params
,
1250 struct virtio_gpu_mem_entry
*ents
,
1253 struct virtio_gpu_resource_create_blob
*cmd_p
;
1254 struct virtio_gpu_vbuffer
*vbuf
;
1256 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
1257 memset(cmd_p
, 0, sizeof(*cmd_p
));
1259 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB
);
1260 cmd_p
->hdr
.ctx_id
= cpu_to_le32(params
->ctx_id
);
1261 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
1262 cmd_p
->blob_mem
= cpu_to_le32(params
->blob_mem
);
1263 cmd_p
->blob_flags
= cpu_to_le32(params
->blob_flags
);
1264 cmd_p
->blob_id
= cpu_to_le64(params
->blob_id
);
1265 cmd_p
->size
= cpu_to_le64(params
->size
);
1266 cmd_p
->nr_entries
= cpu_to_le32(nents
);
1268 vbuf
->data_buf
= ents
;
1269 vbuf
->data_size
= sizeof(*ents
) * nents
;
1271 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
1275 void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device
*vgdev
,
1276 uint32_t scanout_id
,
1277 struct virtio_gpu_object
*bo
,
1278 struct drm_framebuffer
*fb
,
1279 uint32_t width
, uint32_t height
,
1280 uint32_t x
, uint32_t y
)
1283 struct virtio_gpu_set_scanout_blob
*cmd_p
;
1284 struct virtio_gpu_vbuffer
*vbuf
;
1285 uint32_t format
= virtio_gpu_translate_format(fb
->format
->format
);
1287 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
1288 memset(cmd_p
, 0, sizeof(*cmd_p
));
1290 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB
);
1291 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
1292 cmd_p
->scanout_id
= cpu_to_le32(scanout_id
);
1294 cmd_p
->format
= cpu_to_le32(format
);
1295 cmd_p
->width
= cpu_to_le32(fb
->width
);
1296 cmd_p
->height
= cpu_to_le32(fb
->height
);
1298 for (i
= 0; i
< 4; i
++) {
1299 cmd_p
->strides
[i
] = cpu_to_le32(fb
->pitches
[i
]);
1300 cmd_p
->offsets
[i
] = cpu_to_le32(fb
->offsets
[i
]);
1303 cmd_p
->r
.width
= cpu_to_le32(width
);
1304 cmd_p
->r
.height
= cpu_to_le32(height
);
1305 cmd_p
->r
.x
= cpu_to_le32(x
);
1306 cmd_p
->r
.y
= cpu_to_le32(y
);
1308 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);