2 * Copyright (C) 2015 Red Hat, Inc.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
29 #include "virtgpu_drv.h"
30 #include <drm/virtgpu_drm.h>
31 #include "ttm/ttm_execbuf_util.h"
33 static void convert_to_hw_box(struct virtio_gpu_box
*dst
,
34 const struct drm_virtgpu_3d_box
*src
)
36 dst
->x
= cpu_to_le32(src
->x
);
37 dst
->y
= cpu_to_le32(src
->y
);
38 dst
->z
= cpu_to_le32(src
->z
);
39 dst
->w
= cpu_to_le32(src
->w
);
40 dst
->h
= cpu_to_le32(src
->h
);
41 dst
->d
= cpu_to_le32(src
->d
);
44 static int virtio_gpu_map_ioctl(struct drm_device
*dev
, void *data
,
45 struct drm_file
*file_priv
)
47 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
48 struct drm_virtgpu_map
*virtio_gpu_map
= data
;
50 return virtio_gpu_mode_dumb_mmap(file_priv
, vgdev
->ddev
,
51 virtio_gpu_map
->handle
,
52 &virtio_gpu_map
->offset
);
55 static int virtio_gpu_object_list_validate(struct ww_acquire_ctx
*ticket
,
56 struct list_head
*head
)
58 struct ttm_validate_buffer
*buf
;
59 struct ttm_buffer_object
*bo
;
60 struct virtio_gpu_object
*qobj
;
63 ret
= ttm_eu_reserve_buffers(ticket
, head
, true, NULL
);
67 list_for_each_entry(buf
, head
, head
) {
69 qobj
= container_of(bo
, struct virtio_gpu_object
, tbo
);
70 ret
= ttm_bo_validate(bo
, &qobj
->placement
, false, false);
72 ttm_eu_backoff_reservation(ticket
, head
);
79 static void virtio_gpu_unref_list(struct list_head
*head
)
81 struct ttm_validate_buffer
*buf
;
82 struct ttm_buffer_object
*bo
;
83 struct virtio_gpu_object
*qobj
;
84 list_for_each_entry(buf
, head
, head
) {
86 qobj
= container_of(bo
, struct virtio_gpu_object
, tbo
);
88 drm_gem_object_unreference_unlocked(&qobj
->gem_base
);
92 static int virtio_gpu_execbuffer(struct drm_device
*dev
,
93 struct drm_virtgpu_execbuffer
*exbuf
,
94 struct drm_file
*drm_file
)
96 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
97 struct virtio_gpu_fpriv
*vfpriv
= drm_file
->driver_priv
;
98 struct drm_gem_object
*gobj
;
99 struct virtio_gpu_fence
*fence
;
100 struct virtio_gpu_object
*qobj
;
102 uint32_t *bo_handles
= NULL
;
103 void __user
*user_bo_handles
= NULL
;
104 struct list_head validate_list
;
105 struct ttm_validate_buffer
*buflist
= NULL
;
107 struct ww_acquire_ctx ticket
;
110 if (vgdev
->has_virgl_3d
== false)
113 INIT_LIST_HEAD(&validate_list
);
114 if (exbuf
->num_bo_handles
) {
116 bo_handles
= drm_malloc_ab(exbuf
->num_bo_handles
,
118 buflist
= drm_calloc_large(exbuf
->num_bo_handles
,
119 sizeof(struct ttm_validate_buffer
));
120 if (!bo_handles
|| !buflist
) {
121 drm_free_large(bo_handles
);
122 drm_free_large(buflist
);
126 user_bo_handles
= (void __user
*)(uintptr_t)exbuf
->bo_handles
;
127 if (copy_from_user(bo_handles
, user_bo_handles
,
128 exbuf
->num_bo_handles
* sizeof(uint32_t))) {
130 drm_free_large(bo_handles
);
131 drm_free_large(buflist
);
135 for (i
= 0; i
< exbuf
->num_bo_handles
; i
++) {
136 gobj
= drm_gem_object_lookup(dev
,
137 drm_file
, bo_handles
[i
]);
139 drm_free_large(bo_handles
);
140 drm_free_large(buflist
);
144 qobj
= gem_to_virtio_gpu_obj(gobj
);
145 buflist
[i
].bo
= &qobj
->tbo
;
147 list_add(&buflist
[i
].head
, &validate_list
);
149 drm_free_large(bo_handles
);
152 ret
= virtio_gpu_object_list_validate(&ticket
, &validate_list
);
156 buf
= kmalloc(exbuf
->size
, GFP_KERNEL
);
161 if (copy_from_user(buf
, (void __user
*)(uintptr_t)exbuf
->command
,
167 virtio_gpu_cmd_submit(vgdev
, buf
, exbuf
->size
,
168 vfpriv
->ctx_id
, &fence
);
170 ttm_eu_fence_buffer_objects(&ticket
, &validate_list
, &fence
->f
);
172 /* fence the command bo */
173 virtio_gpu_unref_list(&validate_list
);
174 drm_free_large(buflist
);
175 fence_put(&fence
->f
);
179 ttm_eu_backoff_reservation(&ticket
, &validate_list
);
181 virtio_gpu_unref_list(&validate_list
);
182 drm_free_large(buflist
);
187 * Usage of execbuffer:
188 * Relocations need to take into account the full VIRTIO_GPUDrawable size.
189 * However, the command as passed from user space must *not* contain the initial
190 * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
192 static int virtio_gpu_execbuffer_ioctl(struct drm_device
*dev
, void *data
,
193 struct drm_file
*file_priv
)
195 struct drm_virtgpu_execbuffer
*execbuffer
= data
;
196 return virtio_gpu_execbuffer(dev
, execbuffer
, file_priv
);
200 static int virtio_gpu_getparam_ioctl(struct drm_device
*dev
, void *data
,
201 struct drm_file
*file_priv
)
203 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
204 struct drm_virtgpu_getparam
*param
= data
;
207 switch (param
->param
) {
208 case VIRTGPU_PARAM_3D_FEATURES
:
209 value
= vgdev
->has_virgl_3d
== true ? 1 : 0;
214 if (copy_to_user((void __user
*)(unsigned long)param
->value
,
215 &value
, sizeof(int))) {
221 static int virtio_gpu_resource_create_ioctl(struct drm_device
*dev
, void *data
,
222 struct drm_file
*file_priv
)
224 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
225 struct drm_virtgpu_resource_create
*rc
= data
;
228 struct virtio_gpu_object
*qobj
;
229 struct drm_gem_object
*obj
;
232 struct list_head validate_list
;
233 struct ttm_validate_buffer mainbuf
;
234 struct virtio_gpu_fence
*fence
= NULL
;
235 struct ww_acquire_ctx ticket
;
236 struct virtio_gpu_resource_create_3d rc_3d
;
238 if (vgdev
->has_virgl_3d
== false) {
241 if (rc
->nr_samples
> 1)
243 if (rc
->last_level
> 1)
247 if (rc
->array_size
> 1)
251 INIT_LIST_HEAD(&validate_list
);
252 memset(&mainbuf
, 0, sizeof(struct ttm_validate_buffer
));
254 virtio_gpu_resource_id_get(vgdev
, &res_id
);
258 /* allocate a single page size object */
262 qobj
= virtio_gpu_alloc_object(dev
, size
, false, false);
267 obj
= &qobj
->gem_base
;
269 if (!vgdev
->has_virgl_3d
) {
270 virtio_gpu_cmd_create_resource(vgdev
, res_id
, rc
->format
,
271 rc
->width
, rc
->height
);
273 ret
= virtio_gpu_object_attach(vgdev
, qobj
, res_id
, NULL
);
275 /* use a gem reference since unref list undoes them */
276 drm_gem_object_reference(&qobj
->gem_base
);
277 mainbuf
.bo
= &qobj
->tbo
;
278 list_add(&mainbuf
.head
, &validate_list
);
280 ret
= virtio_gpu_object_list_validate(&ticket
, &validate_list
);
282 DRM_DEBUG("failed to validate\n");
286 rc_3d
.resource_id
= cpu_to_le32(res_id
);
287 rc_3d
.target
= cpu_to_le32(rc
->target
);
288 rc_3d
.format
= cpu_to_le32(rc
->format
);
289 rc_3d
.bind
= cpu_to_le32(rc
->bind
);
290 rc_3d
.width
= cpu_to_le32(rc
->width
);
291 rc_3d
.height
= cpu_to_le32(rc
->height
);
292 rc_3d
.depth
= cpu_to_le32(rc
->depth
);
293 rc_3d
.array_size
= cpu_to_le32(rc
->array_size
);
294 rc_3d
.last_level
= cpu_to_le32(rc
->last_level
);
295 rc_3d
.nr_samples
= cpu_to_le32(rc
->nr_samples
);
296 rc_3d
.flags
= cpu_to_le32(rc
->flags
);
298 virtio_gpu_cmd_resource_create_3d(vgdev
, &rc_3d
, NULL
);
299 ret
= virtio_gpu_object_attach(vgdev
, qobj
, res_id
, &fence
);
301 ttm_eu_backoff_reservation(&ticket
, &validate_list
);
304 ttm_eu_fence_buffer_objects(&ticket
, &validate_list
, &fence
->f
);
307 qobj
->hw_res_handle
= res_id
;
309 ret
= drm_gem_handle_create(file_priv
, obj
, &handle
);
312 drm_gem_object_release(obj
);
313 if (vgdev
->has_virgl_3d
) {
314 virtio_gpu_unref_list(&validate_list
);
315 fence_put(&fence
->f
);
319 drm_gem_object_unreference_unlocked(obj
);
321 rc
->res_handle
= res_id
; /* similiar to a VM address */
322 rc
->bo_handle
= handle
;
324 if (vgdev
->has_virgl_3d
) {
325 virtio_gpu_unref_list(&validate_list
);
326 fence_put(&fence
->f
);
330 if (vgdev
->has_virgl_3d
) {
331 virtio_gpu_unref_list(&validate_list
);
332 fence_put(&fence
->f
);
335 // drm_gem_object_handle_unreference_unlocked(obj);
337 virtio_gpu_resource_id_put(vgdev
, res_id
);
341 static int virtio_gpu_resource_info_ioctl(struct drm_device
*dev
, void *data
,
342 struct drm_file
*file_priv
)
344 struct drm_virtgpu_resource_info
*ri
= data
;
345 struct drm_gem_object
*gobj
= NULL
;
346 struct virtio_gpu_object
*qobj
= NULL
;
348 gobj
= drm_gem_object_lookup(dev
, file_priv
, ri
->bo_handle
);
352 qobj
= gem_to_virtio_gpu_obj(gobj
);
354 ri
->size
= qobj
->gem_base
.size
;
355 ri
->res_handle
= qobj
->hw_res_handle
;
356 drm_gem_object_unreference_unlocked(gobj
);
360 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device
*dev
,
362 struct drm_file
*file
)
364 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
365 struct virtio_gpu_fpriv
*vfpriv
= file
->driver_priv
;
366 struct drm_virtgpu_3d_transfer_from_host
*args
= data
;
367 struct drm_gem_object
*gobj
= NULL
;
368 struct virtio_gpu_object
*qobj
= NULL
;
369 struct virtio_gpu_fence
*fence
;
371 u32 offset
= args
->offset
;
372 struct virtio_gpu_box box
;
374 if (vgdev
->has_virgl_3d
== false)
377 gobj
= drm_gem_object_lookup(dev
, file
, args
->bo_handle
);
381 qobj
= gem_to_virtio_gpu_obj(gobj
);
383 ret
= virtio_gpu_object_reserve(qobj
, false);
387 ret
= ttm_bo_validate(&qobj
->tbo
, &qobj
->placement
,
392 convert_to_hw_box(&box
, &args
->box
);
393 virtio_gpu_cmd_transfer_from_host_3d
394 (vgdev
, qobj
->hw_res_handle
,
395 vfpriv
->ctx_id
, offset
, args
->level
,
397 reservation_object_add_excl_fence(qobj
->tbo
.resv
,
400 fence_put(&fence
->f
);
402 virtio_gpu_object_unreserve(qobj
);
404 drm_gem_object_unreference_unlocked(gobj
);
408 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device
*dev
, void *data
,
409 struct drm_file
*file
)
411 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
412 struct virtio_gpu_fpriv
*vfpriv
= file
->driver_priv
;
413 struct drm_virtgpu_3d_transfer_to_host
*args
= data
;
414 struct drm_gem_object
*gobj
= NULL
;
415 struct virtio_gpu_object
*qobj
= NULL
;
416 struct virtio_gpu_fence
*fence
;
417 struct virtio_gpu_box box
;
419 u32 offset
= args
->offset
;
421 gobj
= drm_gem_object_lookup(dev
, file
, args
->bo_handle
);
425 qobj
= gem_to_virtio_gpu_obj(gobj
);
427 ret
= virtio_gpu_object_reserve(qobj
, false);
431 ret
= ttm_bo_validate(&qobj
->tbo
, &qobj
->placement
,
436 convert_to_hw_box(&box
, &args
->box
);
437 if (!vgdev
->has_virgl_3d
) {
438 virtio_gpu_cmd_transfer_to_host_2d
439 (vgdev
, qobj
->hw_res_handle
, offset
,
440 box
.w
, box
.h
, box
.x
, box
.y
, NULL
);
442 virtio_gpu_cmd_transfer_to_host_3d
443 (vgdev
, qobj
->hw_res_handle
,
444 vfpriv
? vfpriv
->ctx_id
: 0, offset
,
445 args
->level
, &box
, &fence
);
446 reservation_object_add_excl_fence(qobj
->tbo
.resv
,
448 fence_put(&fence
->f
);
452 virtio_gpu_object_unreserve(qobj
);
454 drm_gem_object_unreference_unlocked(gobj
);
458 static int virtio_gpu_wait_ioctl(struct drm_device
*dev
, void *data
,
459 struct drm_file
*file
)
461 struct drm_virtgpu_3d_wait
*args
= data
;
462 struct drm_gem_object
*gobj
= NULL
;
463 struct virtio_gpu_object
*qobj
= NULL
;
467 gobj
= drm_gem_object_lookup(dev
, file
, args
->handle
);
471 qobj
= gem_to_virtio_gpu_obj(gobj
);
473 if (args
->flags
& VIRTGPU_WAIT_NOWAIT
)
475 ret
= virtio_gpu_object_wait(qobj
, nowait
);
477 drm_gem_object_unreference_unlocked(gobj
);
481 static int virtio_gpu_get_caps_ioctl(struct drm_device
*dev
,
482 void *data
, struct drm_file
*file
)
484 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
485 struct drm_virtgpu_get_caps
*args
= data
;
488 int found_valid
= -1;
490 struct virtio_gpu_drv_cap_cache
*cache_ent
;
492 if (vgdev
->num_capsets
== 0)
495 spin_lock(&vgdev
->display_info_lock
);
496 for (i
= 0; i
< vgdev
->num_capsets
; i
++) {
497 if (vgdev
->capsets
[i
].id
== args
->cap_set_id
) {
498 if (vgdev
->capsets
[i
].max_version
>= args
->cap_set_ver
) {
505 if (found_valid
== -1) {
506 spin_unlock(&vgdev
->display_info_lock
);
510 size
= vgdev
->capsets
[found_valid
].max_size
;
511 if (args
->size
> size
) {
512 spin_unlock(&vgdev
->display_info_lock
);
516 list_for_each_entry(cache_ent
, &vgdev
->cap_cache
, head
) {
517 if (cache_ent
->id
== args
->cap_set_id
&&
518 cache_ent
->version
== args
->cap_set_ver
) {
519 ptr
= cache_ent
->caps_cache
;
520 spin_unlock(&vgdev
->display_info_lock
);
524 spin_unlock(&vgdev
->display_info_lock
);
526 /* not in cache - need to talk to hw */
527 virtio_gpu_cmd_get_capset(vgdev
, found_valid
, args
->cap_set_ver
,
530 ret
= wait_event_timeout(vgdev
->resp_wq
,
531 atomic_read(&cache_ent
->is_valid
), 5 * HZ
);
533 ptr
= cache_ent
->caps_cache
;
536 if (copy_to_user((void __user
*)(unsigned long)args
->addr
, ptr
, size
))
542 struct drm_ioctl_desc virtio_gpu_ioctls
[DRM_VIRTIO_NUM_IOCTLS
] = {
543 DRM_IOCTL_DEF_DRV(VIRTGPU_MAP
, virtio_gpu_map_ioctl
,
544 DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
546 DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER
, virtio_gpu_execbuffer_ioctl
,
547 DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
549 DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM
, virtio_gpu_getparam_ioctl
,
550 DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
552 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE
,
553 virtio_gpu_resource_create_ioctl
,
554 DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
556 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO
, virtio_gpu_resource_info_ioctl
,
557 DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
559 /* make transfer async to the main ring? - no sure, can we
560 thread these in the underlying GL */
561 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST
,
562 virtio_gpu_transfer_from_host_ioctl
,
563 DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
564 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST
,
565 virtio_gpu_transfer_to_host_ioctl
,
566 DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
568 DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT
, virtio_gpu_wait_ioctl
,
569 DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
571 DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS
, virtio_gpu_get_caps_ioctl
,
572 DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),