2 * Copyright (C) 2015 Red Hat, Inc.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
28 #include <linux/file.h>
29 #include <linux/sync_file.h>
30 #include <linux/uaccess.h>
32 #include <drm/drm_file.h>
33 #include <drm/virtgpu_drm.h>
35 #include "virtgpu_drv.h"
37 #define VIRTGPU_BLOB_FLAG_USE_MASK (VIRTGPU_BLOB_FLAG_USE_MAPPABLE | \
38 VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \
39 VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)
41 void virtio_gpu_create_context(struct drm_device
*dev
, struct drm_file
*file
)
43 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
44 struct virtio_gpu_fpriv
*vfpriv
= file
->driver_priv
;
45 char dbgname
[TASK_COMM_LEN
];
47 mutex_lock(&vfpriv
->context_lock
);
48 if (vfpriv
->context_created
)
51 get_task_comm(dbgname
, current
);
52 virtio_gpu_cmd_context_create(vgdev
, vfpriv
->ctx_id
,
53 strlen(dbgname
), dbgname
);
54 vfpriv
->context_created
= true;
57 mutex_unlock(&vfpriv
->context_lock
);
60 static int virtio_gpu_map_ioctl(struct drm_device
*dev
, void *data
,
61 struct drm_file
*file
)
63 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
64 struct drm_virtgpu_map
*virtio_gpu_map
= data
;
66 return virtio_gpu_mode_dumb_mmap(file
, vgdev
->ddev
,
67 virtio_gpu_map
->handle
,
68 &virtio_gpu_map
->offset
);
72 * Usage of execbuffer:
73 * Relocations need to take into account the full VIRTIO_GPUDrawable size.
74 * However, the command as passed from user space must *not* contain the initial
75 * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
77 static int virtio_gpu_execbuffer_ioctl(struct drm_device
*dev
, void *data
,
78 struct drm_file
*file
)
80 struct drm_virtgpu_execbuffer
*exbuf
= data
;
81 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
82 struct virtio_gpu_fpriv
*vfpriv
= file
->driver_priv
;
83 struct virtio_gpu_fence
*out_fence
;
85 uint32_t *bo_handles
= NULL
;
86 void __user
*user_bo_handles
= NULL
;
87 struct virtio_gpu_object_array
*buflist
= NULL
;
88 struct sync_file
*sync_file
;
89 int in_fence_fd
= exbuf
->fence_fd
;
90 int out_fence_fd
= -1;
93 if (vgdev
->has_virgl_3d
== false)
96 if ((exbuf
->flags
& ~VIRTGPU_EXECBUF_FLAGS
))
101 virtio_gpu_create_context(dev
, file
);
102 if (exbuf
->flags
& VIRTGPU_EXECBUF_FENCE_FD_IN
) {
103 struct dma_fence
*in_fence
;
105 in_fence
= sync_file_get_fence(in_fence_fd
);
111 * Wait if the fence is from a foreign context, or if the fence
112 * array contains any fence from a foreign context.
115 if (!dma_fence_match_context(in_fence
, vgdev
->fence_drv
.context
))
116 ret
= dma_fence_wait(in_fence
, true);
118 dma_fence_put(in_fence
);
123 if (exbuf
->flags
& VIRTGPU_EXECBUF_FENCE_FD_OUT
) {
124 out_fence_fd
= get_unused_fd_flags(O_CLOEXEC
);
125 if (out_fence_fd
< 0)
129 if (exbuf
->num_bo_handles
) {
130 bo_handles
= kvmalloc_array(exbuf
->num_bo_handles
,
131 sizeof(uint32_t), GFP_KERNEL
);
137 user_bo_handles
= u64_to_user_ptr(exbuf
->bo_handles
);
138 if (copy_from_user(bo_handles
, user_bo_handles
,
139 exbuf
->num_bo_handles
* sizeof(uint32_t))) {
144 buflist
= virtio_gpu_array_from_handles(file
, bo_handles
,
145 exbuf
->num_bo_handles
);
154 buf
= vmemdup_user(u64_to_user_ptr(exbuf
->command
), exbuf
->size
);
161 ret
= virtio_gpu_array_lock_resv(buflist
);
166 out_fence
= virtio_gpu_fence_alloc(vgdev
);
172 if (out_fence_fd
>= 0) {
173 sync_file
= sync_file_create(&out_fence
->f
);
175 dma_fence_put(&out_fence
->f
);
180 exbuf
->fence_fd
= out_fence_fd
;
181 fd_install(out_fence_fd
, sync_file
->file
);
184 virtio_gpu_cmd_submit(vgdev
, buf
, exbuf
->size
,
185 vfpriv
->ctx_id
, buflist
, out_fence
);
186 dma_fence_put(&out_fence
->f
);
187 virtio_gpu_notify(vgdev
);
192 virtio_gpu_array_unlock_resv(buflist
);
198 virtio_gpu_array_put_free(buflist
);
200 if (out_fence_fd
>= 0)
201 put_unused_fd(out_fence_fd
);
206 static int virtio_gpu_getparam_ioctl(struct drm_device
*dev
, void *data
,
207 struct drm_file
*file
)
209 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
210 struct drm_virtgpu_getparam
*param
= data
;
213 switch (param
->param
) {
214 case VIRTGPU_PARAM_3D_FEATURES
:
215 value
= vgdev
->has_virgl_3d
? 1 : 0;
217 case VIRTGPU_PARAM_CAPSET_QUERY_FIX
:
220 case VIRTGPU_PARAM_RESOURCE_BLOB
:
221 value
= vgdev
->has_resource_blob
? 1 : 0;
223 case VIRTGPU_PARAM_HOST_VISIBLE
:
224 value
= vgdev
->has_host_visible
? 1 : 0;
226 case VIRTGPU_PARAM_CROSS_DEVICE
:
227 value
= vgdev
->has_resource_assign_uuid
? 1 : 0;
232 if (copy_to_user(u64_to_user_ptr(param
->value
), &value
, sizeof(int)))
238 static int virtio_gpu_resource_create_ioctl(struct drm_device
*dev
, void *data
,
239 struct drm_file
*file
)
241 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
242 struct drm_virtgpu_resource_create
*rc
= data
;
243 struct virtio_gpu_fence
*fence
;
245 struct virtio_gpu_object
*qobj
;
246 struct drm_gem_object
*obj
;
248 struct virtio_gpu_object_params params
= { 0 };
250 if (vgdev
->has_virgl_3d
) {
251 virtio_gpu_create_context(dev
, file
);
253 params
.target
= rc
->target
;
254 params
.bind
= rc
->bind
;
255 params
.depth
= rc
->depth
;
256 params
.array_size
= rc
->array_size
;
257 params
.last_level
= rc
->last_level
;
258 params
.nr_samples
= rc
->nr_samples
;
259 params
.flags
= rc
->flags
;
263 if (rc
->nr_samples
> 1)
265 if (rc
->last_level
> 1)
269 if (rc
->array_size
> 1)
273 params
.format
= rc
->format
;
274 params
.width
= rc
->width
;
275 params
.height
= rc
->height
;
276 params
.size
= rc
->size
;
277 /* allocate a single page size object */
278 if (params
.size
== 0)
279 params
.size
= PAGE_SIZE
;
281 fence
= virtio_gpu_fence_alloc(vgdev
);
284 ret
= virtio_gpu_object_create(vgdev
, ¶ms
, &qobj
, fence
);
285 dma_fence_put(&fence
->f
);
288 obj
= &qobj
->base
.base
;
290 ret
= drm_gem_handle_create(file
, obj
, &handle
);
292 drm_gem_object_release(obj
);
295 drm_gem_object_put(obj
);
297 rc
->res_handle
= qobj
->hw_res_handle
; /* similiar to a VM address */
298 rc
->bo_handle
= handle
;
302 static int virtio_gpu_resource_info_ioctl(struct drm_device
*dev
, void *data
,
303 struct drm_file
*file
)
305 struct drm_virtgpu_resource_info
*ri
= data
;
306 struct drm_gem_object
*gobj
= NULL
;
307 struct virtio_gpu_object
*qobj
= NULL
;
309 gobj
= drm_gem_object_lookup(file
, ri
->bo_handle
);
313 qobj
= gem_to_virtio_gpu_obj(gobj
);
315 ri
->size
= qobj
->base
.base
.size
;
316 ri
->res_handle
= qobj
->hw_res_handle
;
317 if (qobj
->host3d_blob
|| qobj
->guest_blob
)
318 ri
->blob_mem
= qobj
->blob_mem
;
320 drm_gem_object_put(gobj
);
324 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device
*dev
,
326 struct drm_file
*file
)
328 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
329 struct virtio_gpu_fpriv
*vfpriv
= file
->driver_priv
;
330 struct drm_virtgpu_3d_transfer_from_host
*args
= data
;
331 struct virtio_gpu_object
*bo
;
332 struct virtio_gpu_object_array
*objs
;
333 struct virtio_gpu_fence
*fence
;
335 u32 offset
= args
->offset
;
337 if (vgdev
->has_virgl_3d
== false)
340 virtio_gpu_create_context(dev
, file
);
341 objs
= virtio_gpu_array_from_handles(file
, &args
->bo_handle
, 1);
345 bo
= gem_to_virtio_gpu_obj(objs
->objs
[0]);
346 if (bo
->guest_blob
&& !bo
->host3d_blob
) {
351 if (!bo
->host3d_blob
&& (args
->stride
|| args
->layer_stride
)) {
356 ret
= virtio_gpu_array_lock_resv(objs
);
360 fence
= virtio_gpu_fence_alloc(vgdev
);
366 virtio_gpu_cmd_transfer_from_host_3d
367 (vgdev
, vfpriv
->ctx_id
, offset
, args
->level
, args
->stride
,
368 args
->layer_stride
, &args
->box
, objs
, fence
);
369 dma_fence_put(&fence
->f
);
370 virtio_gpu_notify(vgdev
);
374 virtio_gpu_array_unlock_resv(objs
);
376 virtio_gpu_array_put_free(objs
);
380 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device
*dev
, void *data
,
381 struct drm_file
*file
)
383 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
384 struct virtio_gpu_fpriv
*vfpriv
= file
->driver_priv
;
385 struct drm_virtgpu_3d_transfer_to_host
*args
= data
;
386 struct virtio_gpu_object
*bo
;
387 struct virtio_gpu_object_array
*objs
;
388 struct virtio_gpu_fence
*fence
;
390 u32 offset
= args
->offset
;
392 objs
= virtio_gpu_array_from_handles(file
, &args
->bo_handle
, 1);
396 bo
= gem_to_virtio_gpu_obj(objs
->objs
[0]);
397 if (bo
->guest_blob
&& !bo
->host3d_blob
) {
402 if (!vgdev
->has_virgl_3d
) {
403 virtio_gpu_cmd_transfer_to_host_2d
405 args
->box
.w
, args
->box
.h
, args
->box
.x
, args
->box
.y
,
408 virtio_gpu_create_context(dev
, file
);
410 if (!bo
->host3d_blob
&& (args
->stride
|| args
->layer_stride
)) {
415 ret
= virtio_gpu_array_lock_resv(objs
);
420 fence
= virtio_gpu_fence_alloc(vgdev
);
424 virtio_gpu_cmd_transfer_to_host_3d
426 vfpriv
? vfpriv
->ctx_id
: 0, offset
, args
->level
,
427 args
->stride
, args
->layer_stride
, &args
->box
, objs
,
429 dma_fence_put(&fence
->f
);
431 virtio_gpu_notify(vgdev
);
435 virtio_gpu_array_unlock_resv(objs
);
437 virtio_gpu_array_put_free(objs
);
441 static int virtio_gpu_wait_ioctl(struct drm_device
*dev
, void *data
,
442 struct drm_file
*file
)
444 struct drm_virtgpu_3d_wait
*args
= data
;
445 struct drm_gem_object
*obj
;
446 long timeout
= 15 * HZ
;
449 obj
= drm_gem_object_lookup(file
, args
->handle
);
453 if (args
->flags
& VIRTGPU_WAIT_NOWAIT
) {
454 ret
= dma_resv_test_signaled_rcu(obj
->resv
, true);
456 ret
= dma_resv_wait_timeout_rcu(obj
->resv
, true, true,
464 drm_gem_object_put(obj
);
468 static int virtio_gpu_get_caps_ioctl(struct drm_device
*dev
,
469 void *data
, struct drm_file
*file
)
471 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
472 struct drm_virtgpu_get_caps
*args
= data
;
473 unsigned size
, host_caps_size
;
475 int found_valid
= -1;
477 struct virtio_gpu_drv_cap_cache
*cache_ent
;
480 if (vgdev
->num_capsets
== 0)
483 /* don't allow userspace to pass 0 */
487 spin_lock(&vgdev
->display_info_lock
);
488 for (i
= 0; i
< vgdev
->num_capsets
; i
++) {
489 if (vgdev
->capsets
[i
].id
== args
->cap_set_id
) {
490 if (vgdev
->capsets
[i
].max_version
>= args
->cap_set_ver
) {
497 if (found_valid
== -1) {
498 spin_unlock(&vgdev
->display_info_lock
);
502 host_caps_size
= vgdev
->capsets
[found_valid
].max_size
;
503 /* only copy to user the minimum of the host caps size or the guest caps size */
504 size
= min(args
->size
, host_caps_size
);
506 list_for_each_entry(cache_ent
, &vgdev
->cap_cache
, head
) {
507 if (cache_ent
->id
== args
->cap_set_id
&&
508 cache_ent
->version
== args
->cap_set_ver
) {
509 spin_unlock(&vgdev
->display_info_lock
);
513 spin_unlock(&vgdev
->display_info_lock
);
515 /* not in cache - need to talk to hw */
516 virtio_gpu_cmd_get_capset(vgdev
, found_valid
, args
->cap_set_ver
,
518 virtio_gpu_notify(vgdev
);
521 ret
= wait_event_timeout(vgdev
->resp_wq
,
522 atomic_read(&cache_ent
->is_valid
), 5 * HZ
);
526 /* is_valid check must proceed before copy of the cache entry. */
529 ptr
= cache_ent
->caps_cache
;
531 if (copy_to_user(u64_to_user_ptr(args
->addr
), ptr
, size
))
537 static int verify_blob(struct virtio_gpu_device
*vgdev
,
538 struct virtio_gpu_fpriv
*vfpriv
,
539 struct virtio_gpu_object_params
*params
,
540 struct drm_virtgpu_resource_create_blob
*rc_blob
,
541 bool *guest_blob
, bool *host3d_blob
)
543 if (!vgdev
->has_resource_blob
)
546 if ((rc_blob
->blob_flags
& ~VIRTGPU_BLOB_FLAG_USE_MASK
) ||
547 !rc_blob
->blob_flags
)
550 if (rc_blob
->blob_flags
& VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE
) {
551 if (!vgdev
->has_resource_assign_uuid
)
555 switch (rc_blob
->blob_mem
) {
556 case VIRTGPU_BLOB_MEM_GUEST
:
559 case VIRTGPU_BLOB_MEM_HOST3D_GUEST
:
562 case VIRTGPU_BLOB_MEM_HOST3D
:
570 if (!vgdev
->has_virgl_3d
)
573 /* Must be dword aligned. */
574 if (rc_blob
->cmd_size
% 4 != 0)
577 params
->ctx_id
= vfpriv
->ctx_id
;
578 params
->blob_id
= rc_blob
->blob_id
;
580 if (rc_blob
->blob_id
!= 0)
583 if (rc_blob
->cmd_size
!= 0)
587 params
->blob_mem
= rc_blob
->blob_mem
;
588 params
->size
= rc_blob
->size
;
590 params
->blob_flags
= rc_blob
->blob_flags
;
594 static int virtio_gpu_resource_create_blob_ioctl(struct drm_device
*dev
,
596 struct drm_file
*file
)
600 bool guest_blob
= false;
601 bool host3d_blob
= false;
602 struct drm_gem_object
*obj
;
603 struct virtio_gpu_object
*bo
;
604 struct virtio_gpu_object_params params
= { 0 };
605 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
606 struct virtio_gpu_fpriv
*vfpriv
= file
->driver_priv
;
607 struct drm_virtgpu_resource_create_blob
*rc_blob
= data
;
609 if (verify_blob(vgdev
, vfpriv
, ¶ms
, rc_blob
,
610 &guest_blob
, &host3d_blob
))
613 if (vgdev
->has_virgl_3d
)
614 virtio_gpu_create_context(dev
, file
);
616 if (rc_blob
->cmd_size
) {
619 buf
= memdup_user(u64_to_user_ptr(rc_blob
->cmd
),
625 virtio_gpu_cmd_submit(vgdev
, buf
, rc_blob
->cmd_size
,
626 vfpriv
->ctx_id
, NULL
, NULL
);
630 ret
= virtio_gpu_object_create(vgdev
, ¶ms
, &bo
, NULL
);
631 else if (!guest_blob
&& host3d_blob
)
632 ret
= virtio_gpu_vram_create(vgdev
, ¶ms
, &bo
);
639 bo
->guest_blob
= guest_blob
;
640 bo
->host3d_blob
= host3d_blob
;
641 bo
->blob_mem
= rc_blob
->blob_mem
;
642 bo
->blob_flags
= rc_blob
->blob_flags
;
644 obj
= &bo
->base
.base
;
645 if (params
.blob_flags
& VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE
) {
646 ret
= virtio_gpu_resource_assign_uuid(vgdev
, bo
);
648 drm_gem_object_release(obj
);
653 ret
= drm_gem_handle_create(file
, obj
, &handle
);
655 drm_gem_object_release(obj
);
658 drm_gem_object_put(obj
);
660 rc_blob
->res_handle
= bo
->hw_res_handle
;
661 rc_blob
->bo_handle
= handle
;
666 struct drm_ioctl_desc virtio_gpu_ioctls
[DRM_VIRTIO_NUM_IOCTLS
] = {
667 DRM_IOCTL_DEF_DRV(VIRTGPU_MAP
, virtio_gpu_map_ioctl
,
670 DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER
, virtio_gpu_execbuffer_ioctl
,
673 DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM
, virtio_gpu_getparam_ioctl
,
676 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE
,
677 virtio_gpu_resource_create_ioctl
,
680 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO
, virtio_gpu_resource_info_ioctl
,
683 /* make transfer async to the main ring? - no sure, can we
684 * thread these in the underlying GL
686 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST
,
687 virtio_gpu_transfer_from_host_ioctl
,
689 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST
,
690 virtio_gpu_transfer_to_host_ioctl
,
693 DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT
, virtio_gpu_wait_ioctl
,
696 DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS
, virtio_gpu_get_caps_ioctl
,
699 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB
,
700 virtio_gpu_resource_create_blob_ioctl
,