1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_object.h>
31 #include <drm/ttm/ttm_placement.h>
33 #include "vmwgfx_resource_priv.h"
35 #define VMW_RES_EVICT_ERR_COUNT 10
37 struct vmw_user_dma_buffer
{
38 struct ttm_prime_object prime
;
39 struct vmw_dma_buffer dma
;
42 struct vmw_bo_user_rep
{
48 struct vmw_resource res
;
52 struct vmw_user_stream
{
53 struct ttm_base_object base
;
54 struct vmw_stream stream
;
58 static uint64_t vmw_user_stream_size
;
60 static const struct vmw_res_func vmw_stream_func
= {
61 .res_type
= vmw_res_stream
,
62 .needs_backup
= false,
64 .type_name
= "video streams",
65 .backup_placement
= NULL
,
72 static inline struct vmw_dma_buffer
*
73 vmw_dma_buffer(struct ttm_buffer_object
*bo
)
75 return container_of(bo
, struct vmw_dma_buffer
, base
);
78 static inline struct vmw_user_dma_buffer
*
79 vmw_user_dma_buffer(struct ttm_buffer_object
*bo
)
81 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
82 return container_of(vmw_bo
, struct vmw_user_dma_buffer
, dma
);
85 struct vmw_resource
*vmw_resource_reference(struct vmw_resource
*res
)
92 vmw_resource_reference_unless_doomed(struct vmw_resource
*res
)
94 return kref_get_unless_zero(&res
->kref
) ? res
: NULL
;
98 * vmw_resource_release_id - release a resource id to the id manager.
100 * @res: Pointer to the resource.
102 * Release the resource id to the resource id manager and set it to -1
104 void vmw_resource_release_id(struct vmw_resource
*res
)
106 struct vmw_private
*dev_priv
= res
->dev_priv
;
107 struct idr
*idr
= &dev_priv
->res_idr
[res
->func
->res_type
];
109 write_lock(&dev_priv
->resource_lock
);
111 idr_remove(idr
, res
->id
);
113 write_unlock(&dev_priv
->resource_lock
);
116 static void vmw_resource_release(struct kref
*kref
)
118 struct vmw_resource
*res
=
119 container_of(kref
, struct vmw_resource
, kref
);
120 struct vmw_private
*dev_priv
= res
->dev_priv
;
122 struct idr
*idr
= &dev_priv
->res_idr
[res
->func
->res_type
];
125 list_del_init(&res
->lru_head
);
126 write_unlock(&dev_priv
->resource_lock
);
128 struct ttm_buffer_object
*bo
= &res
->backup
->base
;
130 ttm_bo_reserve(bo
, false, false, false, 0);
131 if (!list_empty(&res
->mob_head
) &&
132 res
->func
->unbind
!= NULL
) {
133 struct ttm_validate_buffer val_buf
;
136 res
->func
->unbind(res
, false, &val_buf
);
138 res
->backup_dirty
= false;
139 list_del_init(&res
->mob_head
);
140 ttm_bo_unreserve(bo
);
141 vmw_dmabuf_unreference(&res
->backup
);
144 if (likely(res
->hw_destroy
!= NULL
)) {
145 res
->hw_destroy(res
);
146 mutex_lock(&dev_priv
->binding_mutex
);
147 vmw_context_binding_res_list_kill(&res
->binding_head
);
148 mutex_unlock(&dev_priv
->binding_mutex
);
152 if (res
->res_free
!= NULL
)
157 write_lock(&dev_priv
->resource_lock
);
163 void vmw_resource_unreference(struct vmw_resource
**p_res
)
165 struct vmw_resource
*res
= *p_res
;
166 struct vmw_private
*dev_priv
= res
->dev_priv
;
169 write_lock(&dev_priv
->resource_lock
);
170 kref_put(&res
->kref
, vmw_resource_release
);
171 write_unlock(&dev_priv
->resource_lock
);
176 * vmw_resource_alloc_id - release a resource id to the id manager.
178 * @res: Pointer to the resource.
180 * Allocate the lowest free resource from the resource manager, and set
181 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
183 int vmw_resource_alloc_id(struct vmw_resource
*res
)
185 struct vmw_private
*dev_priv
= res
->dev_priv
;
187 struct idr
*idr
= &dev_priv
->res_idr
[res
->func
->res_type
];
189 BUG_ON(res
->id
!= -1);
191 idr_preload(GFP_KERNEL
);
192 write_lock(&dev_priv
->resource_lock
);
194 ret
= idr_alloc(idr
, res
, 1, 0, GFP_NOWAIT
);
198 write_unlock(&dev_priv
->resource_lock
);
200 return ret
< 0 ? ret
: 0;
204 * vmw_resource_init - initialize a struct vmw_resource
206 * @dev_priv: Pointer to a device private struct.
207 * @res: The struct vmw_resource to initialize.
208 * @obj_type: Resource object type.
209 * @delay_id: Boolean whether to defer device id allocation until
210 * the first validation.
211 * @res_free: Resource destructor.
212 * @func: Resource function table.
214 int vmw_resource_init(struct vmw_private
*dev_priv
, struct vmw_resource
*res
,
216 void (*res_free
) (struct vmw_resource
*res
),
217 const struct vmw_res_func
*func
)
219 kref_init(&res
->kref
);
220 res
->hw_destroy
= NULL
;
221 res
->res_free
= res_free
;
223 res
->dev_priv
= dev_priv
;
225 INIT_LIST_HEAD(&res
->lru_head
);
226 INIT_LIST_HEAD(&res
->mob_head
);
227 INIT_LIST_HEAD(&res
->binding_head
);
230 res
->backup_offset
= 0;
231 res
->backup_dirty
= false;
232 res
->res_dirty
= false;
236 return vmw_resource_alloc_id(res
);
240 * vmw_resource_activate
242 * @res: Pointer to the newly created resource
243 * @hw_destroy: Destroy function. NULL if none.
245 * Activate a resource after the hardware has been made aware of it.
246 * Set tye destroy function to @destroy. Typically this frees the
247 * resource and destroys the hardware resources associated with it.
248 * Activate basically means that the function vmw_resource_lookup will
251 void vmw_resource_activate(struct vmw_resource
*res
,
252 void (*hw_destroy
) (struct vmw_resource
*))
254 struct vmw_private
*dev_priv
= res
->dev_priv
;
256 write_lock(&dev_priv
->resource_lock
);
258 res
->hw_destroy
= hw_destroy
;
259 write_unlock(&dev_priv
->resource_lock
);
262 struct vmw_resource
*vmw_resource_lookup(struct vmw_private
*dev_priv
,
263 struct idr
*idr
, int id
)
265 struct vmw_resource
*res
;
267 read_lock(&dev_priv
->resource_lock
);
268 res
= idr_find(idr
, id
);
269 if (res
&& res
->avail
)
270 kref_get(&res
->kref
);
273 read_unlock(&dev_priv
->resource_lock
);
275 if (unlikely(res
== NULL
))
282 * vmw_user_resource_lookup_handle - lookup a struct resource from a
283 * TTM user-space handle and perform basic type checks
285 * @dev_priv: Pointer to a device private struct
286 * @tfile: Pointer to a struct ttm_object_file identifying the caller
287 * @handle: The TTM user-space handle
288 * @converter: Pointer to an object describing the resource type
289 * @p_res: On successful return the location pointed to will contain
290 * a pointer to a refcounted struct vmw_resource.
292 * If the handle can't be found or is associated with an incorrect resource
293 * type, -EINVAL will be returned.
295 int vmw_user_resource_lookup_handle(struct vmw_private
*dev_priv
,
296 struct ttm_object_file
*tfile
,
298 const struct vmw_user_resource_conv
300 struct vmw_resource
**p_res
)
302 struct ttm_base_object
*base
;
303 struct vmw_resource
*res
;
306 base
= ttm_base_object_lookup(tfile
, handle
);
307 if (unlikely(base
== NULL
))
310 if (unlikely(ttm_base_object_type(base
) != converter
->object_type
))
311 goto out_bad_resource
;
313 res
= converter
->base_obj_to_res(base
);
315 read_lock(&dev_priv
->resource_lock
);
316 if (!res
->avail
|| res
->res_free
!= converter
->res_free
) {
317 read_unlock(&dev_priv
->resource_lock
);
318 goto out_bad_resource
;
321 kref_get(&res
->kref
);
322 read_unlock(&dev_priv
->resource_lock
);
328 ttm_base_object_unref(&base
);
334 * Helper function that looks either a surface or dmabuf.
336 * The pointer this pointed at by out_surf and out_buf needs to be null.
338 int vmw_user_lookup_handle(struct vmw_private
*dev_priv
,
339 struct ttm_object_file
*tfile
,
341 struct vmw_surface
**out_surf
,
342 struct vmw_dma_buffer
**out_buf
)
344 struct vmw_resource
*res
;
347 BUG_ON(*out_surf
|| *out_buf
);
349 ret
= vmw_user_resource_lookup_handle(dev_priv
, tfile
, handle
,
350 user_surface_converter
,
353 *out_surf
= vmw_res_to_srf(res
);
358 ret
= vmw_user_dmabuf_lookup(tfile
, handle
, out_buf
);
367 * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
369 * @dev_priv: Pointer to a struct vmw_private identifying the device.
370 * @size: The requested buffer size.
371 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
373 static size_t vmw_dmabuf_acc_size(struct vmw_private
*dev_priv
, size_t size
,
376 static size_t struct_size
, user_struct_size
;
377 size_t num_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
378 size_t page_array_size
= ttm_round_pot(num_pages
* sizeof(void *));
380 if (unlikely(struct_size
== 0)) {
381 size_t backend_size
= ttm_round_pot(vmw_tt_size
);
383 struct_size
= backend_size
+
384 ttm_round_pot(sizeof(struct vmw_dma_buffer
));
385 user_struct_size
= backend_size
+
386 ttm_round_pot(sizeof(struct vmw_user_dma_buffer
));
389 if (dev_priv
->map_mode
== vmw_dma_alloc_coherent
)
391 ttm_round_pot(num_pages
* sizeof(dma_addr_t
));
393 return ((user
) ? user_struct_size
: struct_size
) +
397 void vmw_dmabuf_bo_free(struct ttm_buffer_object
*bo
)
399 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
404 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object
*bo
)
406 struct vmw_user_dma_buffer
*vmw_user_bo
= vmw_user_dma_buffer(bo
);
408 ttm_prime_object_kfree(vmw_user_bo
, prime
);
411 int vmw_dmabuf_init(struct vmw_private
*dev_priv
,
412 struct vmw_dma_buffer
*vmw_bo
,
413 size_t size
, struct ttm_placement
*placement
,
415 void (*bo_free
) (struct ttm_buffer_object
*bo
))
417 struct ttm_bo_device
*bdev
= &dev_priv
->bdev
;
420 bool user
= (bo_free
== &vmw_user_dmabuf_destroy
);
422 BUG_ON(!bo_free
&& (!user
&& (bo_free
!= vmw_dmabuf_bo_free
)));
424 acc_size
= vmw_dmabuf_acc_size(dev_priv
, size
, user
);
425 memset(vmw_bo
, 0, sizeof(*vmw_bo
));
427 INIT_LIST_HEAD(&vmw_bo
->res_list
);
429 ret
= ttm_bo_init(bdev
, &vmw_bo
->base
, size
,
430 (user
) ? ttm_bo_type_device
:
431 ttm_bo_type_kernel
, placement
,
433 NULL
, acc_size
, NULL
, bo_free
);
437 static void vmw_user_dmabuf_release(struct ttm_base_object
**p_base
)
439 struct vmw_user_dma_buffer
*vmw_user_bo
;
440 struct ttm_base_object
*base
= *p_base
;
441 struct ttm_buffer_object
*bo
;
445 if (unlikely(base
== NULL
))
448 vmw_user_bo
= container_of(base
, struct vmw_user_dma_buffer
,
450 bo
= &vmw_user_bo
->dma
.base
;
454 static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object
*base
,
455 enum ttm_ref_type ref_type
)
457 struct vmw_user_dma_buffer
*user_bo
;
458 user_bo
= container_of(base
, struct vmw_user_dma_buffer
, prime
.base
);
461 case TTM_REF_SYNCCPU_WRITE
:
462 ttm_bo_synccpu_write_release(&user_bo
->dma
.base
);
470 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
472 * @dev_priv: Pointer to a struct device private.
473 * @tfile: Pointer to a struct ttm_object_file on which to register the user
475 * @size: Size of the dma buffer.
476 * @shareable: Boolean whether the buffer is shareable with other open files.
477 * @handle: Pointer to where the handle value should be assigned.
478 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
479 * should be assigned.
481 int vmw_user_dmabuf_alloc(struct vmw_private
*dev_priv
,
482 struct ttm_object_file
*tfile
,
486 struct vmw_dma_buffer
**p_dma_buf
)
488 struct vmw_user_dma_buffer
*user_bo
;
489 struct ttm_buffer_object
*tmp
;
492 user_bo
= kzalloc(sizeof(*user_bo
), GFP_KERNEL
);
493 if (unlikely(user_bo
== NULL
)) {
494 DRM_ERROR("Failed to allocate a buffer.\n");
498 ret
= vmw_dmabuf_init(dev_priv
, &user_bo
->dma
, size
,
499 (dev_priv
->has_mob
) ?
501 &vmw_vram_sys_placement
, true,
502 &vmw_user_dmabuf_destroy
);
503 if (unlikely(ret
!= 0))
506 tmp
= ttm_bo_reference(&user_bo
->dma
.base
);
507 ret
= ttm_prime_object_init(tfile
,
512 &vmw_user_dmabuf_release
,
513 &vmw_user_dmabuf_ref_obj_release
);
514 if (unlikely(ret
!= 0)) {
516 goto out_no_base_object
;
519 *p_dma_buf
= &user_bo
->dma
;
520 *handle
= user_bo
->prime
.base
.hash
.key
;
527 * vmw_user_dmabuf_verify_access - verify access permissions on this
530 * @bo: Pointer to the buffer object being accessed
531 * @tfile: Identifying the caller.
533 int vmw_user_dmabuf_verify_access(struct ttm_buffer_object
*bo
,
534 struct ttm_object_file
*tfile
)
536 struct vmw_user_dma_buffer
*vmw_user_bo
;
538 if (unlikely(bo
->destroy
!= vmw_user_dmabuf_destroy
))
541 vmw_user_bo
= vmw_user_dma_buffer(bo
);
542 return (vmw_user_bo
->prime
.base
.tfile
== tfile
||
543 vmw_user_bo
->prime
.base
.shareable
) ? 0 : -EPERM
;
547 * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
548 * access, idling previous GPU operations on the buffer and optionally
549 * blocking it for further command submissions.
551 * @user_bo: Pointer to the buffer object being grabbed for CPU access
552 * @tfile: Identifying the caller.
553 * @flags: Flags indicating how the grab should be performed.
555 * A blocking grab will be automatically released when @tfile is closed.
557 static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer
*user_bo
,
558 struct ttm_object_file
*tfile
,
561 struct ttm_buffer_object
*bo
= &user_bo
->dma
.base
;
565 if (flags
& drm_vmw_synccpu_allow_cs
) {
566 struct ttm_bo_device
*bdev
= bo
->bdev
;
568 spin_lock(&bdev
->fence_lock
);
569 ret
= ttm_bo_wait(bo
, false, true,
570 !!(flags
& drm_vmw_synccpu_dontblock
));
571 spin_unlock(&bdev
->fence_lock
);
575 ret
= ttm_bo_synccpu_write_grab
576 (bo
, !!(flags
& drm_vmw_synccpu_dontblock
));
577 if (unlikely(ret
!= 0))
580 ret
= ttm_ref_object_add(tfile
, &user_bo
->prime
.base
,
581 TTM_REF_SYNCCPU_WRITE
, &existed
);
582 if (ret
!= 0 || existed
)
583 ttm_bo_synccpu_write_release(&user_bo
->dma
.base
);
589 * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
590 * and unblock command submission on the buffer if blocked.
592 * @handle: Handle identifying the buffer object.
593 * @tfile: Identifying the caller.
594 * @flags: Flags indicating the type of release.
596 static int vmw_user_dmabuf_synccpu_release(uint32_t handle
,
597 struct ttm_object_file
*tfile
,
600 if (!(flags
& drm_vmw_synccpu_allow_cs
))
601 return ttm_ref_object_base_unref(tfile
, handle
,
602 TTM_REF_SYNCCPU_WRITE
);
608 * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
611 * @dev: Identifies the drm device.
612 * @data: Pointer to the ioctl argument.
613 * @file_priv: Identifies the caller.
615 * This function checks the ioctl arguments for validity and calls the
616 * relevant synccpu functions.
618 int vmw_user_dmabuf_synccpu_ioctl(struct drm_device
*dev
, void *data
,
619 struct drm_file
*file_priv
)
621 struct drm_vmw_synccpu_arg
*arg
=
622 (struct drm_vmw_synccpu_arg
*) data
;
623 struct vmw_dma_buffer
*dma_buf
;
624 struct vmw_user_dma_buffer
*user_bo
;
625 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
628 if ((arg
->flags
& (drm_vmw_synccpu_read
| drm_vmw_synccpu_write
)) == 0
629 || (arg
->flags
& ~(drm_vmw_synccpu_read
| drm_vmw_synccpu_write
|
630 drm_vmw_synccpu_dontblock
|
631 drm_vmw_synccpu_allow_cs
)) != 0) {
632 DRM_ERROR("Illegal synccpu flags.\n");
637 case drm_vmw_synccpu_grab
:
638 ret
= vmw_user_dmabuf_lookup(tfile
, arg
->handle
, &dma_buf
);
639 if (unlikely(ret
!= 0))
642 user_bo
= container_of(dma_buf
, struct vmw_user_dma_buffer
,
644 ret
= vmw_user_dmabuf_synccpu_grab(user_bo
, tfile
, arg
->flags
);
645 vmw_dmabuf_unreference(&dma_buf
);
646 if (unlikely(ret
!= 0 && ret
!= -ERESTARTSYS
&&
648 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
649 (unsigned int) arg
->handle
);
653 case drm_vmw_synccpu_release
:
654 ret
= vmw_user_dmabuf_synccpu_release(arg
->handle
, tfile
,
656 if (unlikely(ret
!= 0)) {
657 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
658 (unsigned int) arg
->handle
);
663 DRM_ERROR("Invalid synccpu operation.\n");
670 int vmw_dmabuf_alloc_ioctl(struct drm_device
*dev
, void *data
,
671 struct drm_file
*file_priv
)
673 struct vmw_private
*dev_priv
= vmw_priv(dev
);
674 union drm_vmw_alloc_dmabuf_arg
*arg
=
675 (union drm_vmw_alloc_dmabuf_arg
*)data
;
676 struct drm_vmw_alloc_dmabuf_req
*req
= &arg
->req
;
677 struct drm_vmw_dmabuf_rep
*rep
= &arg
->rep
;
678 struct vmw_dma_buffer
*dma_buf
;
680 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
683 ret
= ttm_read_lock(&vmaster
->lock
, true);
684 if (unlikely(ret
!= 0))
687 ret
= vmw_user_dmabuf_alloc(dev_priv
, vmw_fpriv(file_priv
)->tfile
,
688 req
->size
, false, &handle
, &dma_buf
);
689 if (unlikely(ret
!= 0))
692 rep
->handle
= handle
;
693 rep
->map_handle
= drm_vma_node_offset_addr(&dma_buf
->base
.vma_node
);
694 rep
->cur_gmr_id
= handle
;
695 rep
->cur_gmr_offset
= 0;
697 vmw_dmabuf_unreference(&dma_buf
);
700 ttm_read_unlock(&vmaster
->lock
);
705 int vmw_dmabuf_unref_ioctl(struct drm_device
*dev
, void *data
,
706 struct drm_file
*file_priv
)
708 struct drm_vmw_unref_dmabuf_arg
*arg
=
709 (struct drm_vmw_unref_dmabuf_arg
*)data
;
711 return ttm_ref_object_base_unref(vmw_fpriv(file_priv
)->tfile
,
716 int vmw_user_dmabuf_lookup(struct ttm_object_file
*tfile
,
717 uint32_t handle
, struct vmw_dma_buffer
**out
)
719 struct vmw_user_dma_buffer
*vmw_user_bo
;
720 struct ttm_base_object
*base
;
722 base
= ttm_base_object_lookup(tfile
, handle
);
723 if (unlikely(base
== NULL
)) {
724 printk(KERN_ERR
"Invalid buffer object handle 0x%08lx.\n",
725 (unsigned long)handle
);
729 if (unlikely(ttm_base_object_type(base
) != ttm_buffer_type
)) {
730 ttm_base_object_unref(&base
);
731 printk(KERN_ERR
"Invalid buffer object handle 0x%08lx.\n",
732 (unsigned long)handle
);
736 vmw_user_bo
= container_of(base
, struct vmw_user_dma_buffer
,
738 (void)ttm_bo_reference(&vmw_user_bo
->dma
.base
);
739 ttm_base_object_unref(&base
);
740 *out
= &vmw_user_bo
->dma
;
745 int vmw_user_dmabuf_reference(struct ttm_object_file
*tfile
,
746 struct vmw_dma_buffer
*dma_buf
,
749 struct vmw_user_dma_buffer
*user_bo
;
751 if (dma_buf
->base
.destroy
!= vmw_user_dmabuf_destroy
)
754 user_bo
= container_of(dma_buf
, struct vmw_user_dma_buffer
, dma
);
756 *handle
= user_bo
->prime
.base
.hash
.key
;
757 return ttm_ref_object_add(tfile
, &user_bo
->prime
.base
,
758 TTM_REF_USAGE
, NULL
);
765 static void vmw_stream_destroy(struct vmw_resource
*res
)
767 struct vmw_private
*dev_priv
= res
->dev_priv
;
768 struct vmw_stream
*stream
;
771 DRM_INFO("%s: unref\n", __func__
);
772 stream
= container_of(res
, struct vmw_stream
, res
);
774 ret
= vmw_overlay_unref(dev_priv
, stream
->stream_id
);
778 static int vmw_stream_init(struct vmw_private
*dev_priv
,
779 struct vmw_stream
*stream
,
780 void (*res_free
) (struct vmw_resource
*res
))
782 struct vmw_resource
*res
= &stream
->res
;
785 ret
= vmw_resource_init(dev_priv
, res
, false, res_free
,
788 if (unlikely(ret
!= 0)) {
789 if (res_free
== NULL
)
792 res_free(&stream
->res
);
796 ret
= vmw_overlay_claim(dev_priv
, &stream
->stream_id
);
798 vmw_resource_unreference(&res
);
802 DRM_INFO("%s: claimed\n", __func__
);
804 vmw_resource_activate(&stream
->res
, vmw_stream_destroy
);
808 static void vmw_user_stream_free(struct vmw_resource
*res
)
810 struct vmw_user_stream
*stream
=
811 container_of(res
, struct vmw_user_stream
, stream
.res
);
812 struct vmw_private
*dev_priv
= res
->dev_priv
;
814 ttm_base_object_kfree(stream
, base
);
815 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
816 vmw_user_stream_size
);
820 * This function is called when user space has no more references on the
821 * base object. It releases the base-object's reference on the resource object.
824 static void vmw_user_stream_base_release(struct ttm_base_object
**p_base
)
826 struct ttm_base_object
*base
= *p_base
;
827 struct vmw_user_stream
*stream
=
828 container_of(base
, struct vmw_user_stream
, base
);
829 struct vmw_resource
*res
= &stream
->stream
.res
;
832 vmw_resource_unreference(&res
);
835 int vmw_stream_unref_ioctl(struct drm_device
*dev
, void *data
,
836 struct drm_file
*file_priv
)
838 struct vmw_private
*dev_priv
= vmw_priv(dev
);
839 struct vmw_resource
*res
;
840 struct vmw_user_stream
*stream
;
841 struct drm_vmw_stream_arg
*arg
= (struct drm_vmw_stream_arg
*)data
;
842 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
843 struct idr
*idr
= &dev_priv
->res_idr
[vmw_res_stream
];
847 res
= vmw_resource_lookup(dev_priv
, idr
, arg
->stream_id
);
848 if (unlikely(res
== NULL
))
851 if (res
->res_free
!= &vmw_user_stream_free
) {
856 stream
= container_of(res
, struct vmw_user_stream
, stream
.res
);
857 if (stream
->base
.tfile
!= tfile
) {
862 ttm_ref_object_base_unref(tfile
, stream
->base
.hash
.key
, TTM_REF_USAGE
);
864 vmw_resource_unreference(&res
);
868 int vmw_stream_claim_ioctl(struct drm_device
*dev
, void *data
,
869 struct drm_file
*file_priv
)
871 struct vmw_private
*dev_priv
= vmw_priv(dev
);
872 struct vmw_user_stream
*stream
;
873 struct vmw_resource
*res
;
874 struct vmw_resource
*tmp
;
875 struct drm_vmw_stream_arg
*arg
= (struct drm_vmw_stream_arg
*)data
;
876 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
877 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
881 * Approximate idr memory usage with 128 bytes. It will be limited
882 * by maximum number_of streams anyway?
885 if (unlikely(vmw_user_stream_size
== 0))
886 vmw_user_stream_size
= ttm_round_pot(sizeof(*stream
)) + 128;
888 ret
= ttm_read_lock(&vmaster
->lock
, true);
889 if (unlikely(ret
!= 0))
892 ret
= ttm_mem_global_alloc(vmw_mem_glob(dev_priv
),
893 vmw_user_stream_size
,
895 if (unlikely(ret
!= 0)) {
896 if (ret
!= -ERESTARTSYS
)
897 DRM_ERROR("Out of graphics memory for stream"
903 stream
= kmalloc(sizeof(*stream
), GFP_KERNEL
);
904 if (unlikely(stream
== NULL
)) {
905 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
906 vmw_user_stream_size
);
911 res
= &stream
->stream
.res
;
912 stream
->base
.shareable
= false;
913 stream
->base
.tfile
= NULL
;
916 * From here on, the destructor takes over resource freeing.
919 ret
= vmw_stream_init(dev_priv
, &stream
->stream
, vmw_user_stream_free
);
920 if (unlikely(ret
!= 0))
923 tmp
= vmw_resource_reference(res
);
924 ret
= ttm_base_object_init(tfile
, &stream
->base
, false, VMW_RES_STREAM
,
925 &vmw_user_stream_base_release
, NULL
);
927 if (unlikely(ret
!= 0)) {
928 vmw_resource_unreference(&tmp
);
932 arg
->stream_id
= res
->id
;
934 vmw_resource_unreference(&res
);
936 ttm_read_unlock(&vmaster
->lock
);
940 int vmw_user_stream_lookup(struct vmw_private
*dev_priv
,
941 struct ttm_object_file
*tfile
,
942 uint32_t *inout_id
, struct vmw_resource
**out
)
944 struct vmw_user_stream
*stream
;
945 struct vmw_resource
*res
;
948 res
= vmw_resource_lookup(dev_priv
, &dev_priv
->res_idr
[vmw_res_stream
],
950 if (unlikely(res
== NULL
))
953 if (res
->res_free
!= &vmw_user_stream_free
) {
958 stream
= container_of(res
, struct vmw_user_stream
, stream
.res
);
959 if (stream
->base
.tfile
!= tfile
) {
964 *inout_id
= stream
->stream
.stream_id
;
968 vmw_resource_unreference(&res
);
974 * vmw_dumb_create - Create a dumb kms buffer
976 * @file_priv: Pointer to a struct drm_file identifying the caller.
977 * @dev: Pointer to the drm device.
978 * @args: Pointer to a struct drm_mode_create_dumb structure
980 * This is a driver callback for the core drm create_dumb functionality.
981 * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
982 * that the arguments have a different format.
984 int vmw_dumb_create(struct drm_file
*file_priv
,
985 struct drm_device
*dev
,
986 struct drm_mode_create_dumb
*args
)
988 struct vmw_private
*dev_priv
= vmw_priv(dev
);
989 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
990 struct vmw_dma_buffer
*dma_buf
;
993 args
->pitch
= args
->width
* ((args
->bpp
+ 7) / 8);
994 args
->size
= args
->pitch
* args
->height
;
996 ret
= ttm_read_lock(&vmaster
->lock
, true);
997 if (unlikely(ret
!= 0))
1000 ret
= vmw_user_dmabuf_alloc(dev_priv
, vmw_fpriv(file_priv
)->tfile
,
1001 args
->size
, false, &args
->handle
,
1003 if (unlikely(ret
!= 0))
1006 vmw_dmabuf_unreference(&dma_buf
);
1008 ttm_read_unlock(&vmaster
->lock
);
1013 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1015 * @file_priv: Pointer to a struct drm_file identifying the caller.
1016 * @dev: Pointer to the drm device.
1017 * @handle: Handle identifying the dumb buffer.
1018 * @offset: The address space offset returned.
1020 * This is a driver callback for the core drm dumb_map_offset functionality.
1022 int vmw_dumb_map_offset(struct drm_file
*file_priv
,
1023 struct drm_device
*dev
, uint32_t handle
,
1026 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1027 struct vmw_dma_buffer
*out_buf
;
1030 ret
= vmw_user_dmabuf_lookup(tfile
, handle
, &out_buf
);
1034 *offset
= drm_vma_node_offset_addr(&out_buf
->base
.vma_node
);
1035 vmw_dmabuf_unreference(&out_buf
);
1040 * vmw_dumb_destroy - Destroy a dumb boffer
1042 * @file_priv: Pointer to a struct drm_file identifying the caller.
1043 * @dev: Pointer to the drm device.
1044 * @handle: Handle identifying the dumb buffer.
1046 * This is a driver callback for the core drm dumb_destroy functionality.
1048 int vmw_dumb_destroy(struct drm_file
*file_priv
,
1049 struct drm_device
*dev
,
1052 return ttm_ref_object_base_unref(vmw_fpriv(file_priv
)->tfile
,
1053 handle
, TTM_REF_USAGE
);
1057 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
1059 * @res: The resource for which to allocate a backup buffer.
1060 * @interruptible: Whether any sleeps during allocation should be
1061 * performed while interruptible.
1063 static int vmw_resource_buf_alloc(struct vmw_resource
*res
,
1066 unsigned long size
=
1067 (res
->backup_size
+ PAGE_SIZE
- 1) & PAGE_MASK
;
1068 struct vmw_dma_buffer
*backup
;
1071 if (likely(res
->backup
)) {
1072 BUG_ON(res
->backup
->base
.num_pages
* PAGE_SIZE
< size
);
1076 backup
= kzalloc(sizeof(*backup
), GFP_KERNEL
);
1077 if (unlikely(backup
== NULL
))
1080 ret
= vmw_dmabuf_init(res
->dev_priv
, backup
, res
->backup_size
,
1081 res
->func
->backup_placement
,
1083 &vmw_dmabuf_bo_free
);
1084 if (unlikely(ret
!= 0))
1087 res
->backup
= backup
;
1094 * vmw_resource_do_validate - Make a resource up-to-date and visible
1097 * @res: The resource to make visible to the device.
1098 * @val_buf: Information about a buffer possibly
1099 * containing backup data if a bind operation is needed.
1101 * On hardware resource shortage, this function returns -EBUSY and
1102 * should be retried once resources have been freed up.
1104 static int vmw_resource_do_validate(struct vmw_resource
*res
,
1105 struct ttm_validate_buffer
*val_buf
)
1108 const struct vmw_res_func
*func
= res
->func
;
1110 if (unlikely(res
->id
== -1)) {
1111 ret
= func
->create(res
);
1112 if (unlikely(ret
!= 0))
1117 ((func
->needs_backup
&& list_empty(&res
->mob_head
) &&
1118 val_buf
->bo
!= NULL
) ||
1119 (!func
->needs_backup
&& val_buf
->bo
!= NULL
))) {
1120 ret
= func
->bind(res
, val_buf
);
1121 if (unlikely(ret
!= 0))
1122 goto out_bind_failed
;
1123 if (func
->needs_backup
)
1124 list_add_tail(&res
->mob_head
, &res
->backup
->res_list
);
1128 * Only do this on write operations, and move to
1129 * vmw_resource_unreserve if it can be called after
1130 * backup buffers have been unreserved. Otherwise
1133 res
->res_dirty
= true;
1144 * vmw_resource_unreserve - Unreserve a resource previously reserved for
1145 * command submission.
1147 * @res: Pointer to the struct vmw_resource to unreserve.
1148 * @new_backup: Pointer to new backup buffer if command submission
1150 * @new_backup_offset: New backup offset if @new_backup is !NULL.
1152 * Currently unreserving a resource means putting it back on the device's
1153 * resource lru list, so that it can be evicted if necessary.
1155 void vmw_resource_unreserve(struct vmw_resource
*res
,
1156 struct vmw_dma_buffer
*new_backup
,
1157 unsigned long new_backup_offset
)
1159 struct vmw_private
*dev_priv
= res
->dev_priv
;
1161 if (!list_empty(&res
->lru_head
))
1164 if (new_backup
&& new_backup
!= res
->backup
) {
1167 lockdep_assert_held(&res
->backup
->base
.resv
->lock
.base
);
1168 list_del_init(&res
->mob_head
);
1169 vmw_dmabuf_unreference(&res
->backup
);
1172 res
->backup
= vmw_dmabuf_reference(new_backup
);
1173 lockdep_assert_held(&new_backup
->base
.resv
->lock
.base
);
1174 list_add_tail(&res
->mob_head
, &new_backup
->res_list
);
1177 res
->backup_offset
= new_backup_offset
;
1179 if (!res
->func
->may_evict
|| res
->id
== -1)
1182 write_lock(&dev_priv
->resource_lock
);
1183 list_add_tail(&res
->lru_head
,
1184 &res
->dev_priv
->res_lru
[res
->func
->res_type
]);
1185 write_unlock(&dev_priv
->resource_lock
);
1189 * vmw_resource_check_buffer - Check whether a backup buffer is needed
1190 * for a resource and in that case, allocate
1191 * one, reserve and validate it.
1193 * @res: The resource for which to allocate a backup buffer.
1194 * @interruptible: Whether any sleeps during allocation should be
1195 * performed while interruptible.
1196 * @val_buf: On successful return contains data about the
1197 * reserved and validated backup buffer.
1200 vmw_resource_check_buffer(struct vmw_resource
*res
,
1202 struct ttm_validate_buffer
*val_buf
)
1204 struct list_head val_list
;
1205 bool backup_dirty
= false;
1208 if (unlikely(res
->backup
== NULL
)) {
1209 ret
= vmw_resource_buf_alloc(res
, interruptible
);
1210 if (unlikely(ret
!= 0))
1214 INIT_LIST_HEAD(&val_list
);
1215 val_buf
->bo
= ttm_bo_reference(&res
->backup
->base
);
1216 list_add_tail(&val_buf
->head
, &val_list
);
1217 ret
= ttm_eu_reserve_buffers(NULL
, &val_list
);
1218 if (unlikely(ret
!= 0))
1219 goto out_no_reserve
;
1221 if (res
->func
->needs_backup
&& list_empty(&res
->mob_head
))
1224 backup_dirty
= res
->backup_dirty
;
1225 ret
= ttm_bo_validate(&res
->backup
->base
,
1226 res
->func
->backup_placement
,
1229 if (unlikely(ret
!= 0))
1230 goto out_no_validate
;
1235 ttm_eu_backoff_reservation(NULL
, &val_list
);
1237 ttm_bo_unref(&val_buf
->bo
);
1239 vmw_dmabuf_unreference(&res
->backup
);
1245 * vmw_resource_reserve - Reserve a resource for command submission
1247 * @res: The resource to reserve.
1249 * This function takes the resource off the LRU list and make sure
1250 * a backup buffer is present for guest-backed resources. However,
1251 * the buffer may not be bound to the resource at this point.
1254 int vmw_resource_reserve(struct vmw_resource
*res
, bool no_backup
)
1256 struct vmw_private
*dev_priv
= res
->dev_priv
;
1259 write_lock(&dev_priv
->resource_lock
);
1260 list_del_init(&res
->lru_head
);
1261 write_unlock(&dev_priv
->resource_lock
);
1263 if (res
->func
->needs_backup
&& res
->backup
== NULL
&&
1265 ret
= vmw_resource_buf_alloc(res
, true);
1266 if (unlikely(ret
!= 0))
1274 * vmw_resource_backoff_reservation - Unreserve and unreference a
1277 * @val_buf: Backup buffer information.
1280 vmw_resource_backoff_reservation(struct ttm_validate_buffer
*val_buf
)
1282 struct list_head val_list
;
1284 if (likely(val_buf
->bo
== NULL
))
1287 INIT_LIST_HEAD(&val_list
);
1288 list_add_tail(&val_buf
->head
, &val_list
);
1289 ttm_eu_backoff_reservation(NULL
, &val_list
);
1290 ttm_bo_unref(&val_buf
->bo
);
1294 * vmw_resource_do_evict - Evict a resource, and transfer its data
1295 * to a backup buffer.
1297 * @res: The resource to evict.
1298 * @interruptible: Whether to wait interruptible.
1300 int vmw_resource_do_evict(struct vmw_resource
*res
, bool interruptible
)
1302 struct ttm_validate_buffer val_buf
;
1303 const struct vmw_res_func
*func
= res
->func
;
1306 BUG_ON(!func
->may_evict
);
1309 ret
= vmw_resource_check_buffer(res
, interruptible
, &val_buf
);
1310 if (unlikely(ret
!= 0))
1313 if (unlikely(func
->unbind
!= NULL
&&
1314 (!func
->needs_backup
|| !list_empty(&res
->mob_head
)))) {
1315 ret
= func
->unbind(res
, res
->res_dirty
, &val_buf
);
1316 if (unlikely(ret
!= 0))
1318 list_del_init(&res
->mob_head
);
1320 ret
= func
->destroy(res
);
1321 res
->backup_dirty
= true;
1322 res
->res_dirty
= false;
1324 vmw_resource_backoff_reservation(&val_buf
);
1331 * vmw_resource_validate - Make a resource up-to-date and visible
1334 * @res: The resource to make visible to the device.
1336 * On succesful return, any backup DMA buffer pointed to by @res->backup will
1337 * be reserved and validated.
1338 * On hardware resource shortage, this function will repeatedly evict
1339 * resources of the same type until the validation succeeds.
1341 int vmw_resource_validate(struct vmw_resource
*res
)
1344 struct vmw_resource
*evict_res
;
1345 struct vmw_private
*dev_priv
= res
->dev_priv
;
1346 struct list_head
*lru_list
= &dev_priv
->res_lru
[res
->func
->res_type
];
1347 struct ttm_validate_buffer val_buf
;
1348 unsigned err_count
= 0;
1350 if (likely(!res
->func
->may_evict
))
1355 val_buf
.bo
= &res
->backup
->base
;
1357 ret
= vmw_resource_do_validate(res
, &val_buf
);
1358 if (likely(ret
!= -EBUSY
))
1361 write_lock(&dev_priv
->resource_lock
);
1362 if (list_empty(lru_list
) || !res
->func
->may_evict
) {
1363 DRM_ERROR("Out of device device resources "
1364 "for %s.\n", res
->func
->type_name
);
1366 write_unlock(&dev_priv
->resource_lock
);
1370 evict_res
= vmw_resource_reference
1371 (list_first_entry(lru_list
, struct vmw_resource
,
1373 list_del_init(&evict_res
->lru_head
);
1375 write_unlock(&dev_priv
->resource_lock
);
1377 ret
= vmw_resource_do_evict(evict_res
, true);
1378 if (unlikely(ret
!= 0)) {
1379 write_lock(&dev_priv
->resource_lock
);
1380 list_add_tail(&evict_res
->lru_head
, lru_list
);
1381 write_unlock(&dev_priv
->resource_lock
);
1382 if (ret
== -ERESTARTSYS
||
1383 ++err_count
> VMW_RES_EVICT_ERR_COUNT
) {
1384 vmw_resource_unreference(&evict_res
);
1385 goto out_no_validate
;
1389 vmw_resource_unreference(&evict_res
);
1392 if (unlikely(ret
!= 0))
1393 goto out_no_validate
;
1394 else if (!res
->func
->needs_backup
&& res
->backup
) {
1395 list_del_init(&res
->mob_head
);
1396 vmw_dmabuf_unreference(&res
->backup
);
1406 * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1407 * object without unreserving it.
1409 * @bo: Pointer to the struct ttm_buffer_object to fence.
1410 * @fence: Pointer to the fence. If NULL, this function will
1411 * insert a fence into the command stream..
1413 * Contrary to the ttm_eu version of this function, it takes only
1414 * a single buffer object instead of a list, and it also doesn't
1415 * unreserve the buffer object, which needs to be done separately.
1417 void vmw_fence_single_bo(struct ttm_buffer_object
*bo
,
1418 struct vmw_fence_obj
*fence
)
1420 struct ttm_bo_device
*bdev
= bo
->bdev
;
1421 struct ttm_bo_driver
*driver
= bdev
->driver
;
1422 struct vmw_fence_obj
*old_fence_obj
;
1423 struct vmw_private
*dev_priv
=
1424 container_of(bdev
, struct vmw_private
, bdev
);
1427 vmw_execbuf_fence_commands(NULL
, dev_priv
, &fence
, NULL
);
1429 driver
->sync_obj_ref(fence
);
1431 spin_lock(&bdev
->fence_lock
);
1433 old_fence_obj
= bo
->sync_obj
;
1434 bo
->sync_obj
= fence
;
1436 spin_unlock(&bdev
->fence_lock
);
1439 vmw_fence_obj_unreference(&old_fence_obj
);
1443 * vmw_resource_move_notify - TTM move_notify_callback
1445 * @bo: The TTM buffer object about to move.
1446 * @mem: The truct ttm_mem_reg indicating to what memory
1447 * region the move is taking place.
1449 * Evicts the Guest Backed hardware resource if the backup
1450 * buffer is being moved out of MOB memory.
1451 * Note that this function should not race with the resource
1452 * validation code as long as it accesses only members of struct
1453 * resource that remain static while bo::res is !NULL and
1454 * while we have @bo reserved. struct resource::backup is *not* a
1455 * static member. The resource validation code will take care
1456 * to set @bo::res to NULL, while having @bo reserved when the
1457 * buffer is no longer bound to the resource, so @bo:res can be
1458 * used to determine whether there is a need to unbind and whether
1459 * it is safe to unbind.
1461 void vmw_resource_move_notify(struct ttm_buffer_object
*bo
,
1462 struct ttm_mem_reg
*mem
)
1464 struct vmw_dma_buffer
*dma_buf
;
1469 if (bo
->destroy
!= vmw_dmabuf_bo_free
&&
1470 bo
->destroy
!= vmw_user_dmabuf_destroy
)
1473 dma_buf
= container_of(bo
, struct vmw_dma_buffer
, base
);
1475 if (mem
->mem_type
!= VMW_PL_MOB
) {
1476 struct vmw_resource
*res
, *n
;
1477 struct ttm_bo_device
*bdev
= bo
->bdev
;
1478 struct ttm_validate_buffer val_buf
;
1482 list_for_each_entry_safe(res
, n
, &dma_buf
->res_list
, mob_head
) {
1484 if (unlikely(res
->func
->unbind
== NULL
))
1487 (void) res
->func
->unbind(res
, true, &val_buf
);
1488 res
->backup_dirty
= true;
1489 res
->res_dirty
= false;
1490 list_del_init(&res
->mob_head
);
1493 spin_lock(&bdev
->fence_lock
);
1494 (void) ttm_bo_wait(bo
, false, false, false);
1495 spin_unlock(&bdev
->fence_lock
);
1500 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1502 * @res: The resource being queried.
1504 bool vmw_resource_needs_backup(const struct vmw_resource
*res
)
1506 return res
->func
->needs_backup
;
1510 * vmw_resource_evict_type - Evict all resources of a specific type
1512 * @dev_priv: Pointer to a device private struct
1513 * @type: The resource type to evict
1515 * To avoid thrashing starvation or as part of the hibernation sequence,
1516 * try to evict all evictable resources of a specific type.
1518 static void vmw_resource_evict_type(struct vmw_private
*dev_priv
,
1519 enum vmw_res_type type
)
1521 struct list_head
*lru_list
= &dev_priv
->res_lru
[type
];
1522 struct vmw_resource
*evict_res
;
1523 unsigned err_count
= 0;
1527 write_lock(&dev_priv
->resource_lock
);
1529 if (list_empty(lru_list
))
1532 evict_res
= vmw_resource_reference(
1533 list_first_entry(lru_list
, struct vmw_resource
,
1535 list_del_init(&evict_res
->lru_head
);
1536 write_unlock(&dev_priv
->resource_lock
);
1538 ret
= vmw_resource_do_evict(evict_res
, false);
1539 if (unlikely(ret
!= 0)) {
1540 write_lock(&dev_priv
->resource_lock
);
1541 list_add_tail(&evict_res
->lru_head
, lru_list
);
1542 write_unlock(&dev_priv
->resource_lock
);
1543 if (++err_count
> VMW_RES_EVICT_ERR_COUNT
) {
1544 vmw_resource_unreference(&evict_res
);
1549 vmw_resource_unreference(&evict_res
);
1553 write_unlock(&dev_priv
->resource_lock
);
1557 * vmw_resource_evict_all - Evict all evictable resources
1559 * @dev_priv: Pointer to a device private struct
1561 * To avoid thrashing starvation or as part of the hibernation sequence,
1562 * evict all evictable resources. In particular this means that all
1563 * guest-backed resources that are registered with the device are
1564 * evicted and the OTable becomes clean.
1566 void vmw_resource_evict_all(struct vmw_private
*dev_priv
)
1568 enum vmw_res_type type
;
1570 mutex_lock(&dev_priv
->cmdbuf_mutex
);
1572 for (type
= 0; type
< vmw_res_max
; ++type
)
1573 vmw_resource_evict_type(dev_priv
, type
);
1575 mutex_unlock(&dev_priv
->cmdbuf_mutex
);