Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / gpu / drm / virtio / virtgpu_ioctl.c
blob23eb6d772e405cb7a6e5ab169bfb5a69abf110b5
1 /*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
5 * Authors:
6 * Dave Airlie
7 * Alon Levy
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
28 #include <linux/file.h>
29 #include <linux/sync_file.h>
30 #include <linux/uaccess.h>
32 #include <drm/drm_file.h>
33 #include <drm/virtgpu_drm.h>
35 #include "virtgpu_drv.h"
37 #define VIRTGPU_BLOB_FLAG_USE_MASK (VIRTGPU_BLOB_FLAG_USE_MAPPABLE | \
38 VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \
39 VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)
41 void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
43 struct virtio_gpu_device *vgdev = dev->dev_private;
44 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
45 char dbgname[TASK_COMM_LEN];
47 mutex_lock(&vfpriv->context_lock);
48 if (vfpriv->context_created)
49 goto out_unlock;
51 get_task_comm(dbgname, current);
52 virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
53 strlen(dbgname), dbgname);
54 vfpriv->context_created = true;
56 out_unlock:
57 mutex_unlock(&vfpriv->context_lock);
60 static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
61 struct drm_file *file)
63 struct virtio_gpu_device *vgdev = dev->dev_private;
64 struct drm_virtgpu_map *virtio_gpu_map = data;
66 return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
67 virtio_gpu_map->handle,
68 &virtio_gpu_map->offset);
72 * Usage of execbuffer:
73 * Relocations need to take into account the full VIRTIO_GPUDrawable size.
74 * However, the command as passed from user space must *not* contain the initial
75 * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
77 static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
78 struct drm_file *file)
80 struct drm_virtgpu_execbuffer *exbuf = data;
81 struct virtio_gpu_device *vgdev = dev->dev_private;
82 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
83 struct virtio_gpu_fence *out_fence;
84 int ret;
85 uint32_t *bo_handles = NULL;
86 void __user *user_bo_handles = NULL;
87 struct virtio_gpu_object_array *buflist = NULL;
88 struct sync_file *sync_file;
89 int in_fence_fd = exbuf->fence_fd;
90 int out_fence_fd = -1;
91 void *buf;
93 if (vgdev->has_virgl_3d == false)
94 return -ENOSYS;
96 if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
97 return -EINVAL;
99 exbuf->fence_fd = -1;
101 virtio_gpu_create_context(dev, file);
102 if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
103 struct dma_fence *in_fence;
105 in_fence = sync_file_get_fence(in_fence_fd);
107 if (!in_fence)
108 return -EINVAL;
111 * Wait if the fence is from a foreign context, or if the fence
112 * array contains any fence from a foreign context.
114 ret = 0;
115 if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
116 ret = dma_fence_wait(in_fence, true);
118 dma_fence_put(in_fence);
119 if (ret)
120 return ret;
123 if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
124 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
125 if (out_fence_fd < 0)
126 return out_fence_fd;
129 if (exbuf->num_bo_handles) {
130 bo_handles = kvmalloc_array(exbuf->num_bo_handles,
131 sizeof(uint32_t), GFP_KERNEL);
132 if (!bo_handles) {
133 ret = -ENOMEM;
134 goto out_unused_fd;
137 user_bo_handles = u64_to_user_ptr(exbuf->bo_handles);
138 if (copy_from_user(bo_handles, user_bo_handles,
139 exbuf->num_bo_handles * sizeof(uint32_t))) {
140 ret = -EFAULT;
141 goto out_unused_fd;
144 buflist = virtio_gpu_array_from_handles(file, bo_handles,
145 exbuf->num_bo_handles);
146 if (!buflist) {
147 ret = -ENOENT;
148 goto out_unused_fd;
150 kvfree(bo_handles);
151 bo_handles = NULL;
154 buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
155 if (IS_ERR(buf)) {
156 ret = PTR_ERR(buf);
157 goto out_unused_fd;
160 if (buflist) {
161 ret = virtio_gpu_array_lock_resv(buflist);
162 if (ret)
163 goto out_memdup;
166 out_fence = virtio_gpu_fence_alloc(vgdev);
167 if(!out_fence) {
168 ret = -ENOMEM;
169 goto out_unresv;
172 if (out_fence_fd >= 0) {
173 sync_file = sync_file_create(&out_fence->f);
174 if (!sync_file) {
175 dma_fence_put(&out_fence->f);
176 ret = -ENOMEM;
177 goto out_memdup;
180 exbuf->fence_fd = out_fence_fd;
181 fd_install(out_fence_fd, sync_file->file);
184 virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
185 vfpriv->ctx_id, buflist, out_fence);
186 dma_fence_put(&out_fence->f);
187 virtio_gpu_notify(vgdev);
188 return 0;
190 out_unresv:
191 if (buflist)
192 virtio_gpu_array_unlock_resv(buflist);
193 out_memdup:
194 kvfree(buf);
195 out_unused_fd:
196 kvfree(bo_handles);
197 if (buflist)
198 virtio_gpu_array_put_free(buflist);
200 if (out_fence_fd >= 0)
201 put_unused_fd(out_fence_fd);
203 return ret;
206 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
207 struct drm_file *file)
209 struct virtio_gpu_device *vgdev = dev->dev_private;
210 struct drm_virtgpu_getparam *param = data;
211 int value;
213 switch (param->param) {
214 case VIRTGPU_PARAM_3D_FEATURES:
215 value = vgdev->has_virgl_3d ? 1 : 0;
216 break;
217 case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
218 value = 1;
219 break;
220 case VIRTGPU_PARAM_RESOURCE_BLOB:
221 value = vgdev->has_resource_blob ? 1 : 0;
222 break;
223 case VIRTGPU_PARAM_HOST_VISIBLE:
224 value = vgdev->has_host_visible ? 1 : 0;
225 break;
226 case VIRTGPU_PARAM_CROSS_DEVICE:
227 value = vgdev->has_resource_assign_uuid ? 1 : 0;
228 break;
229 default:
230 return -EINVAL;
232 if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int)))
233 return -EFAULT;
235 return 0;
238 static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
239 struct drm_file *file)
241 struct virtio_gpu_device *vgdev = dev->dev_private;
242 struct drm_virtgpu_resource_create *rc = data;
243 struct virtio_gpu_fence *fence;
244 int ret;
245 struct virtio_gpu_object *qobj;
246 struct drm_gem_object *obj;
247 uint32_t handle = 0;
248 struct virtio_gpu_object_params params = { 0 };
250 if (vgdev->has_virgl_3d) {
251 virtio_gpu_create_context(dev, file);
252 params.virgl = true;
253 params.target = rc->target;
254 params.bind = rc->bind;
255 params.depth = rc->depth;
256 params.array_size = rc->array_size;
257 params.last_level = rc->last_level;
258 params.nr_samples = rc->nr_samples;
259 params.flags = rc->flags;
260 } else {
261 if (rc->depth > 1)
262 return -EINVAL;
263 if (rc->nr_samples > 1)
264 return -EINVAL;
265 if (rc->last_level > 1)
266 return -EINVAL;
267 if (rc->target != 2)
268 return -EINVAL;
269 if (rc->array_size > 1)
270 return -EINVAL;
273 params.format = rc->format;
274 params.width = rc->width;
275 params.height = rc->height;
276 params.size = rc->size;
277 /* allocate a single page size object */
278 if (params.size == 0)
279 params.size = PAGE_SIZE;
281 fence = virtio_gpu_fence_alloc(vgdev);
282 if (!fence)
283 return -ENOMEM;
284 ret = virtio_gpu_object_create(vgdev, &params, &qobj, fence);
285 dma_fence_put(&fence->f);
286 if (ret < 0)
287 return ret;
288 obj = &qobj->base.base;
290 ret = drm_gem_handle_create(file, obj, &handle);
291 if (ret) {
292 drm_gem_object_release(obj);
293 return ret;
295 drm_gem_object_put(obj);
297 rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
298 rc->bo_handle = handle;
299 return 0;
302 static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
303 struct drm_file *file)
305 struct drm_virtgpu_resource_info *ri = data;
306 struct drm_gem_object *gobj = NULL;
307 struct virtio_gpu_object *qobj = NULL;
309 gobj = drm_gem_object_lookup(file, ri->bo_handle);
310 if (gobj == NULL)
311 return -ENOENT;
313 qobj = gem_to_virtio_gpu_obj(gobj);
315 ri->size = qobj->base.base.size;
316 ri->res_handle = qobj->hw_res_handle;
317 if (qobj->host3d_blob || qobj->guest_blob)
318 ri->blob_mem = qobj->blob_mem;
320 drm_gem_object_put(gobj);
321 return 0;
324 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
325 void *data,
326 struct drm_file *file)
328 struct virtio_gpu_device *vgdev = dev->dev_private;
329 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
330 struct drm_virtgpu_3d_transfer_from_host *args = data;
331 struct virtio_gpu_object *bo;
332 struct virtio_gpu_object_array *objs;
333 struct virtio_gpu_fence *fence;
334 int ret;
335 u32 offset = args->offset;
337 if (vgdev->has_virgl_3d == false)
338 return -ENOSYS;
340 virtio_gpu_create_context(dev, file);
341 objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
342 if (objs == NULL)
343 return -ENOENT;
345 bo = gem_to_virtio_gpu_obj(objs->objs[0]);
346 if (bo->guest_blob && !bo->host3d_blob) {
347 ret = -EINVAL;
348 goto err_put_free;
351 if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
352 ret = -EINVAL;
353 goto err_put_free;
356 ret = virtio_gpu_array_lock_resv(objs);
357 if (ret != 0)
358 goto err_put_free;
360 fence = virtio_gpu_fence_alloc(vgdev);
361 if (!fence) {
362 ret = -ENOMEM;
363 goto err_unlock;
366 virtio_gpu_cmd_transfer_from_host_3d
367 (vgdev, vfpriv->ctx_id, offset, args->level, args->stride,
368 args->layer_stride, &args->box, objs, fence);
369 dma_fence_put(&fence->f);
370 virtio_gpu_notify(vgdev);
371 return 0;
373 err_unlock:
374 virtio_gpu_array_unlock_resv(objs);
375 err_put_free:
376 virtio_gpu_array_put_free(objs);
377 return ret;
380 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
381 struct drm_file *file)
383 struct virtio_gpu_device *vgdev = dev->dev_private;
384 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
385 struct drm_virtgpu_3d_transfer_to_host *args = data;
386 struct virtio_gpu_object *bo;
387 struct virtio_gpu_object_array *objs;
388 struct virtio_gpu_fence *fence;
389 int ret;
390 u32 offset = args->offset;
392 objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
393 if (objs == NULL)
394 return -ENOENT;
396 bo = gem_to_virtio_gpu_obj(objs->objs[0]);
397 if (bo->guest_blob && !bo->host3d_blob) {
398 ret = -EINVAL;
399 goto err_put_free;
402 if (!vgdev->has_virgl_3d) {
403 virtio_gpu_cmd_transfer_to_host_2d
404 (vgdev, offset,
405 args->box.w, args->box.h, args->box.x, args->box.y,
406 objs, NULL);
407 } else {
408 virtio_gpu_create_context(dev, file);
410 if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
411 ret = -EINVAL;
412 goto err_put_free;
415 ret = virtio_gpu_array_lock_resv(objs);
416 if (ret != 0)
417 goto err_put_free;
419 ret = -ENOMEM;
420 fence = virtio_gpu_fence_alloc(vgdev);
421 if (!fence)
422 goto err_unlock;
424 virtio_gpu_cmd_transfer_to_host_3d
425 (vgdev,
426 vfpriv ? vfpriv->ctx_id : 0, offset, args->level,
427 args->stride, args->layer_stride, &args->box, objs,
428 fence);
429 dma_fence_put(&fence->f);
431 virtio_gpu_notify(vgdev);
432 return 0;
434 err_unlock:
435 virtio_gpu_array_unlock_resv(objs);
436 err_put_free:
437 virtio_gpu_array_put_free(objs);
438 return ret;
441 static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
442 struct drm_file *file)
444 struct drm_virtgpu_3d_wait *args = data;
445 struct drm_gem_object *obj;
446 long timeout = 15 * HZ;
447 int ret;
449 obj = drm_gem_object_lookup(file, args->handle);
450 if (obj == NULL)
451 return -ENOENT;
453 if (args->flags & VIRTGPU_WAIT_NOWAIT) {
454 ret = dma_resv_test_signaled_rcu(obj->resv, true);
455 } else {
456 ret = dma_resv_wait_timeout_rcu(obj->resv, true, true,
457 timeout);
459 if (ret == 0)
460 ret = -EBUSY;
461 else if (ret > 0)
462 ret = 0;
464 drm_gem_object_put(obj);
465 return ret;
468 static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
469 void *data, struct drm_file *file)
471 struct virtio_gpu_device *vgdev = dev->dev_private;
472 struct drm_virtgpu_get_caps *args = data;
473 unsigned size, host_caps_size;
474 int i;
475 int found_valid = -1;
476 int ret;
477 struct virtio_gpu_drv_cap_cache *cache_ent;
478 void *ptr;
480 if (vgdev->num_capsets == 0)
481 return -ENOSYS;
483 /* don't allow userspace to pass 0 */
484 if (args->size == 0)
485 return -EINVAL;
487 spin_lock(&vgdev->display_info_lock);
488 for (i = 0; i < vgdev->num_capsets; i++) {
489 if (vgdev->capsets[i].id == args->cap_set_id) {
490 if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
491 found_valid = i;
492 break;
497 if (found_valid == -1) {
498 spin_unlock(&vgdev->display_info_lock);
499 return -EINVAL;
502 host_caps_size = vgdev->capsets[found_valid].max_size;
503 /* only copy to user the minimum of the host caps size or the guest caps size */
504 size = min(args->size, host_caps_size);
506 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
507 if (cache_ent->id == args->cap_set_id &&
508 cache_ent->version == args->cap_set_ver) {
509 spin_unlock(&vgdev->display_info_lock);
510 goto copy_exit;
513 spin_unlock(&vgdev->display_info_lock);
515 /* not in cache - need to talk to hw */
516 virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
517 &cache_ent);
518 virtio_gpu_notify(vgdev);
520 copy_exit:
521 ret = wait_event_timeout(vgdev->resp_wq,
522 atomic_read(&cache_ent->is_valid), 5 * HZ);
523 if (!ret)
524 return -EBUSY;
526 /* is_valid check must proceed before copy of the cache entry. */
527 smp_rmb();
529 ptr = cache_ent->caps_cache;
531 if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size))
532 return -EFAULT;
534 return 0;
537 static int verify_blob(struct virtio_gpu_device *vgdev,
538 struct virtio_gpu_fpriv *vfpriv,
539 struct virtio_gpu_object_params *params,
540 struct drm_virtgpu_resource_create_blob *rc_blob,
541 bool *guest_blob, bool *host3d_blob)
543 if (!vgdev->has_resource_blob)
544 return -EINVAL;
546 if ((rc_blob->blob_flags & ~VIRTGPU_BLOB_FLAG_USE_MASK) ||
547 !rc_blob->blob_flags)
548 return -EINVAL;
550 if (rc_blob->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
551 if (!vgdev->has_resource_assign_uuid)
552 return -EINVAL;
555 switch (rc_blob->blob_mem) {
556 case VIRTGPU_BLOB_MEM_GUEST:
557 *guest_blob = true;
558 break;
559 case VIRTGPU_BLOB_MEM_HOST3D_GUEST:
560 *guest_blob = true;
561 fallthrough;
562 case VIRTGPU_BLOB_MEM_HOST3D:
563 *host3d_blob = true;
564 break;
565 default:
566 return -EINVAL;
569 if (*host3d_blob) {
570 if (!vgdev->has_virgl_3d)
571 return -EINVAL;
573 /* Must be dword aligned. */
574 if (rc_blob->cmd_size % 4 != 0)
575 return -EINVAL;
577 params->ctx_id = vfpriv->ctx_id;
578 params->blob_id = rc_blob->blob_id;
579 } else {
580 if (rc_blob->blob_id != 0)
581 return -EINVAL;
583 if (rc_blob->cmd_size != 0)
584 return -EINVAL;
587 params->blob_mem = rc_blob->blob_mem;
588 params->size = rc_blob->size;
589 params->blob = true;
590 params->blob_flags = rc_blob->blob_flags;
591 return 0;
594 static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
595 void *data,
596 struct drm_file *file)
598 int ret = 0;
599 uint32_t handle = 0;
600 bool guest_blob = false;
601 bool host3d_blob = false;
602 struct drm_gem_object *obj;
603 struct virtio_gpu_object *bo;
604 struct virtio_gpu_object_params params = { 0 };
605 struct virtio_gpu_device *vgdev = dev->dev_private;
606 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
607 struct drm_virtgpu_resource_create_blob *rc_blob = data;
609 if (verify_blob(vgdev, vfpriv, &params, rc_blob,
610 &guest_blob, &host3d_blob))
611 return -EINVAL;
613 if (vgdev->has_virgl_3d)
614 virtio_gpu_create_context(dev, file);
616 if (rc_blob->cmd_size) {
617 void *buf;
619 buf = memdup_user(u64_to_user_ptr(rc_blob->cmd),
620 rc_blob->cmd_size);
622 if (IS_ERR(buf))
623 return PTR_ERR(buf);
625 virtio_gpu_cmd_submit(vgdev, buf, rc_blob->cmd_size,
626 vfpriv->ctx_id, NULL, NULL);
629 if (guest_blob)
630 ret = virtio_gpu_object_create(vgdev, &params, &bo, NULL);
631 else if (!guest_blob && host3d_blob)
632 ret = virtio_gpu_vram_create(vgdev, &params, &bo);
633 else
634 return -EINVAL;
636 if (ret < 0)
637 return ret;
639 bo->guest_blob = guest_blob;
640 bo->host3d_blob = host3d_blob;
641 bo->blob_mem = rc_blob->blob_mem;
642 bo->blob_flags = rc_blob->blob_flags;
644 obj = &bo->base.base;
645 if (params.blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
646 ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
647 if (ret) {
648 drm_gem_object_release(obj);
649 return ret;
653 ret = drm_gem_handle_create(file, obj, &handle);
654 if (ret) {
655 drm_gem_object_release(obj);
656 return ret;
658 drm_gem_object_put(obj);
660 rc_blob->res_handle = bo->hw_res_handle;
661 rc_blob->bo_handle = handle;
663 return 0;
666 struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
667 DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
668 DRM_RENDER_ALLOW),
670 DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
671 DRM_RENDER_ALLOW),
673 DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
674 DRM_RENDER_ALLOW),
676 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
677 virtio_gpu_resource_create_ioctl,
678 DRM_RENDER_ALLOW),
680 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
681 DRM_RENDER_ALLOW),
683 /* make transfer async to the main ring? - no sure, can we
684 * thread these in the underlying GL
686 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
687 virtio_gpu_transfer_from_host_ioctl,
688 DRM_RENDER_ALLOW),
689 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
690 virtio_gpu_transfer_to_host_ioctl,
691 DRM_RENDER_ALLOW),
693 DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
694 DRM_RENDER_ALLOW),
696 DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
697 DRM_RENDER_ALLOW),
699 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB,
700 virtio_gpu_resource_create_blob_ioctl,
701 DRM_RENDER_ALLOW),