2 * Copyright (C) 2015 Red Hat, Inc.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
28 #include <linux/file.h>
29 #include <linux/sync_file.h>
31 #include <drm/drm_file.h>
32 #include <drm/virtgpu_drm.h>
34 #include "virtgpu_drv.h"
36 static int virtio_gpu_map_ioctl(struct drm_device
*dev
, void *data
,
37 struct drm_file
*file_priv
)
39 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
40 struct drm_virtgpu_map
*virtio_gpu_map
= data
;
42 return virtio_gpu_mode_dumb_mmap(file_priv
, vgdev
->ddev
,
43 virtio_gpu_map
->handle
,
44 &virtio_gpu_map
->offset
);
48 * Usage of execbuffer:
49 * Relocations need to take into account the full VIRTIO_GPUDrawable size.
50 * However, the command as passed from user space must *not* contain the initial
51 * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
53 static int virtio_gpu_execbuffer_ioctl(struct drm_device
*dev
, void *data
,
54 struct drm_file
*drm_file
)
56 struct drm_virtgpu_execbuffer
*exbuf
= data
;
57 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
58 struct virtio_gpu_fpriv
*vfpriv
= drm_file
->driver_priv
;
59 struct virtio_gpu_fence
*out_fence
;
61 uint32_t *bo_handles
= NULL
;
62 void __user
*user_bo_handles
= NULL
;
63 struct virtio_gpu_object_array
*buflist
= NULL
;
64 struct sync_file
*sync_file
;
65 int in_fence_fd
= exbuf
->fence_fd
;
66 int out_fence_fd
= -1;
69 if (vgdev
->has_virgl_3d
== false)
72 if ((exbuf
->flags
& ~VIRTGPU_EXECBUF_FLAGS
))
77 if (exbuf
->flags
& VIRTGPU_EXECBUF_FENCE_FD_IN
) {
78 struct dma_fence
*in_fence
;
80 in_fence
= sync_file_get_fence(in_fence_fd
);
86 * Wait if the fence is from a foreign context, or if the fence
87 * array contains any fence from a foreign context.
90 if (!dma_fence_match_context(in_fence
, vgdev
->fence_drv
.context
))
91 ret
= dma_fence_wait(in_fence
, true);
93 dma_fence_put(in_fence
);
98 if (exbuf
->flags
& VIRTGPU_EXECBUF_FENCE_FD_OUT
) {
99 out_fence_fd
= get_unused_fd_flags(O_CLOEXEC
);
100 if (out_fence_fd
< 0)
104 if (exbuf
->num_bo_handles
) {
105 bo_handles
= kvmalloc_array(exbuf
->num_bo_handles
,
106 sizeof(uint32_t), GFP_KERNEL
);
112 user_bo_handles
= u64_to_user_ptr(exbuf
->bo_handles
);
113 if (copy_from_user(bo_handles
, user_bo_handles
,
114 exbuf
->num_bo_handles
* sizeof(uint32_t))) {
119 buflist
= virtio_gpu_array_from_handles(drm_file
, bo_handles
,
120 exbuf
->num_bo_handles
);
130 ret
= virtio_gpu_array_lock_resv(buflist
);
135 buf
= vmemdup_user(u64_to_user_ptr(exbuf
->command
), exbuf
->size
);
141 out_fence
= virtio_gpu_fence_alloc(vgdev
);
147 if (out_fence_fd
>= 0) {
148 sync_file
= sync_file_create(&out_fence
->f
);
150 dma_fence_put(&out_fence
->f
);
155 exbuf
->fence_fd
= out_fence_fd
;
156 fd_install(out_fence_fd
, sync_file
->file
);
159 virtio_gpu_cmd_submit(vgdev
, buf
, exbuf
->size
,
160 vfpriv
->ctx_id
, buflist
, out_fence
);
167 virtio_gpu_array_unlock_resv(buflist
);
171 virtio_gpu_array_put_free(buflist
);
173 if (out_fence_fd
>= 0)
174 put_unused_fd(out_fence_fd
);
179 static int virtio_gpu_getparam_ioctl(struct drm_device
*dev
, void *data
,
180 struct drm_file
*file_priv
)
182 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
183 struct drm_virtgpu_getparam
*param
= data
;
186 switch (param
->param
) {
187 case VIRTGPU_PARAM_3D_FEATURES
:
188 value
= vgdev
->has_virgl_3d
== true ? 1 : 0;
190 case VIRTGPU_PARAM_CAPSET_QUERY_FIX
:
196 if (copy_to_user(u64_to_user_ptr(param
->value
), &value
, sizeof(int)))
202 static int virtio_gpu_resource_create_ioctl(struct drm_device
*dev
, void *data
,
203 struct drm_file
*file_priv
)
205 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
206 struct drm_virtgpu_resource_create
*rc
= data
;
207 struct virtio_gpu_fence
*fence
;
209 struct virtio_gpu_object
*qobj
;
210 struct drm_gem_object
*obj
;
212 struct virtio_gpu_object_params params
= { 0 };
214 if (vgdev
->has_virgl_3d
== false) {
217 if (rc
->nr_samples
> 1)
219 if (rc
->last_level
> 1)
223 if (rc
->array_size
> 1)
227 params
.format
= rc
->format
;
228 params
.width
= rc
->width
;
229 params
.height
= rc
->height
;
230 params
.size
= rc
->size
;
231 if (vgdev
->has_virgl_3d
) {
233 params
.target
= rc
->target
;
234 params
.bind
= rc
->bind
;
235 params
.depth
= rc
->depth
;
236 params
.array_size
= rc
->array_size
;
237 params
.last_level
= rc
->last_level
;
238 params
.nr_samples
= rc
->nr_samples
;
239 params
.flags
= rc
->flags
;
241 /* allocate a single page size object */
242 if (params
.size
== 0)
243 params
.size
= PAGE_SIZE
;
245 fence
= virtio_gpu_fence_alloc(vgdev
);
248 ret
= virtio_gpu_object_create(vgdev
, ¶ms
, &qobj
, fence
);
249 dma_fence_put(&fence
->f
);
252 obj
= &qobj
->base
.base
;
254 ret
= drm_gem_handle_create(file_priv
, obj
, &handle
);
256 drm_gem_object_release(obj
);
259 drm_gem_object_put_unlocked(obj
);
261 rc
->res_handle
= qobj
->hw_res_handle
; /* similiar to a VM address */
262 rc
->bo_handle
= handle
;
266 static int virtio_gpu_resource_info_ioctl(struct drm_device
*dev
, void *data
,
267 struct drm_file
*file_priv
)
269 struct drm_virtgpu_resource_info
*ri
= data
;
270 struct drm_gem_object
*gobj
= NULL
;
271 struct virtio_gpu_object
*qobj
= NULL
;
273 gobj
= drm_gem_object_lookup(file_priv
, ri
->bo_handle
);
277 qobj
= gem_to_virtio_gpu_obj(gobj
);
279 ri
->size
= qobj
->base
.base
.size
;
280 ri
->res_handle
= qobj
->hw_res_handle
;
281 drm_gem_object_put_unlocked(gobj
);
285 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device
*dev
,
287 struct drm_file
*file
)
289 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
290 struct virtio_gpu_fpriv
*vfpriv
= file
->driver_priv
;
291 struct drm_virtgpu_3d_transfer_from_host
*args
= data
;
292 struct virtio_gpu_object_array
*objs
;
293 struct virtio_gpu_fence
*fence
;
295 u32 offset
= args
->offset
;
297 if (vgdev
->has_virgl_3d
== false)
300 objs
= virtio_gpu_array_from_handles(file
, &args
->bo_handle
, 1);
304 ret
= virtio_gpu_array_lock_resv(objs
);
308 fence
= virtio_gpu_fence_alloc(vgdev
);
313 virtio_gpu_cmd_transfer_from_host_3d
314 (vgdev
, vfpriv
->ctx_id
, offset
, args
->level
,
315 &args
->box
, objs
, fence
);
316 dma_fence_put(&fence
->f
);
320 virtio_gpu_array_unlock_resv(objs
);
322 virtio_gpu_array_put_free(objs
);
326 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device
*dev
, void *data
,
327 struct drm_file
*file
)
329 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
330 struct virtio_gpu_fpriv
*vfpriv
= file
->driver_priv
;
331 struct drm_virtgpu_3d_transfer_to_host
*args
= data
;
332 struct virtio_gpu_object_array
*objs
;
333 struct virtio_gpu_fence
*fence
;
335 u32 offset
= args
->offset
;
337 objs
= virtio_gpu_array_from_handles(file
, &args
->bo_handle
, 1);
341 if (!vgdev
->has_virgl_3d
) {
342 virtio_gpu_cmd_transfer_to_host_2d
344 args
->box
.w
, args
->box
.h
, args
->box
.x
, args
->box
.y
,
347 ret
= virtio_gpu_array_lock_resv(objs
);
352 fence
= virtio_gpu_fence_alloc(vgdev
);
356 virtio_gpu_cmd_transfer_to_host_3d
358 vfpriv
? vfpriv
->ctx_id
: 0, offset
,
359 args
->level
, &args
->box
, objs
, fence
);
360 dma_fence_put(&fence
->f
);
365 virtio_gpu_array_unlock_resv(objs
);
367 virtio_gpu_array_put_free(objs
);
371 static int virtio_gpu_wait_ioctl(struct drm_device
*dev
, void *data
,
372 struct drm_file
*file
)
374 struct drm_virtgpu_3d_wait
*args
= data
;
375 struct drm_gem_object
*obj
;
376 long timeout
= 15 * HZ
;
379 obj
= drm_gem_object_lookup(file
, args
->handle
);
383 if (args
->flags
& VIRTGPU_WAIT_NOWAIT
) {
384 ret
= dma_resv_test_signaled_rcu(obj
->resv
, true);
386 ret
= dma_resv_wait_timeout_rcu(obj
->resv
, true, true,
394 drm_gem_object_put_unlocked(obj
);
398 static int virtio_gpu_get_caps_ioctl(struct drm_device
*dev
,
399 void *data
, struct drm_file
*file
)
401 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
402 struct drm_virtgpu_get_caps
*args
= data
;
403 unsigned size
, host_caps_size
;
405 int found_valid
= -1;
407 struct virtio_gpu_drv_cap_cache
*cache_ent
;
410 if (vgdev
->num_capsets
== 0)
413 /* don't allow userspace to pass 0 */
417 spin_lock(&vgdev
->display_info_lock
);
418 for (i
= 0; i
< vgdev
->num_capsets
; i
++) {
419 if (vgdev
->capsets
[i
].id
== args
->cap_set_id
) {
420 if (vgdev
->capsets
[i
].max_version
>= args
->cap_set_ver
) {
427 if (found_valid
== -1) {
428 spin_unlock(&vgdev
->display_info_lock
);
432 host_caps_size
= vgdev
->capsets
[found_valid
].max_size
;
433 /* only copy to user the minimum of the host caps size or the guest caps size */
434 size
= min(args
->size
, host_caps_size
);
436 list_for_each_entry(cache_ent
, &vgdev
->cap_cache
, head
) {
437 if (cache_ent
->id
== args
->cap_set_id
&&
438 cache_ent
->version
== args
->cap_set_ver
) {
439 spin_unlock(&vgdev
->display_info_lock
);
443 spin_unlock(&vgdev
->display_info_lock
);
445 /* not in cache - need to talk to hw */
446 virtio_gpu_cmd_get_capset(vgdev
, found_valid
, args
->cap_set_ver
,
450 ret
= wait_event_timeout(vgdev
->resp_wq
,
451 atomic_read(&cache_ent
->is_valid
), 5 * HZ
);
455 /* is_valid check must proceed before copy of the cache entry. */
458 ptr
= cache_ent
->caps_cache
;
460 if (copy_to_user(u64_to_user_ptr(args
->addr
), ptr
, size
))
466 struct drm_ioctl_desc virtio_gpu_ioctls
[DRM_VIRTIO_NUM_IOCTLS
] = {
467 DRM_IOCTL_DEF_DRV(VIRTGPU_MAP
, virtio_gpu_map_ioctl
,
470 DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER
, virtio_gpu_execbuffer_ioctl
,
473 DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM
, virtio_gpu_getparam_ioctl
,
476 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE
,
477 virtio_gpu_resource_create_ioctl
,
480 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO
, virtio_gpu_resource_info_ioctl
,
483 /* make transfer async to the main ring? - no sure, can we
484 * thread these in the underlying GL
486 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST
,
487 virtio_gpu_transfer_from_host_ioctl
,
489 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST
,
490 virtio_gpu_transfer_to_host_ioctl
,
493 DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT
, virtio_gpu_wait_ioctl
,
496 DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS
, virtio_gpu_get_caps_ioctl
,