1 /**************************************************************************
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_resource_priv.h"
30 #include "ttm/ttm_placement.h"
32 #define VMW_COMPAT_SHADER_HT_ORDER 12
35 struct vmw_resource res
;
36 SVGA3dShaderType type
;
40 struct vmw_user_shader
{
41 struct ttm_base_object base
;
42 struct vmw_shader shader
;
46 * enum vmw_compat_shader_state - Staging state for compat shaders
48 enum vmw_compat_shader_state
{
55 * struct vmw_compat_shader - Metadata for compat shaders.
57 * @handle: The TTM handle of the guest backed shader.
58 * @tfile: The struct ttm_object_file the guest backed shader is registered
60 * @hash: Hash item for lookup.
61 * @head: List head for staging lists or the compat shader manager list.
62 * @state: Staging state.
64 * The structure is protected by the cmdbuf lock.
66 struct vmw_compat_shader
{
68 struct ttm_object_file
*tfile
;
69 struct drm_hash_item hash
;
70 struct list_head head
;
71 enum vmw_compat_shader_state state
;
75 * struct vmw_compat_shader_manager - Compat shader manager.
77 * @shaders: Hash table containing staged and commited compat shaders
78 * @list: List of commited shaders.
79 * @dev_priv: Pointer to a device private structure.
81 * @shaders and @list are protected by the cmdbuf mutex for now.
83 struct vmw_compat_shader_manager
{
84 struct drm_open_hash shaders
;
85 struct list_head list
;
86 struct vmw_private
*dev_priv
;
89 static void vmw_user_shader_free(struct vmw_resource
*res
);
90 static struct vmw_resource
*
91 vmw_user_shader_base_to_res(struct ttm_base_object
*base
);
93 static int vmw_gb_shader_create(struct vmw_resource
*res
);
94 static int vmw_gb_shader_bind(struct vmw_resource
*res
,
95 struct ttm_validate_buffer
*val_buf
);
96 static int vmw_gb_shader_unbind(struct vmw_resource
*res
,
98 struct ttm_validate_buffer
*val_buf
);
99 static int vmw_gb_shader_destroy(struct vmw_resource
*res
);
101 static uint64_t vmw_user_shader_size
;
103 static const struct vmw_user_resource_conv user_shader_conv
= {
104 .object_type
= VMW_RES_SHADER
,
105 .base_obj_to_res
= vmw_user_shader_base_to_res
,
106 .res_free
= vmw_user_shader_free
109 const struct vmw_user_resource_conv
*user_shader_converter
=
113 static const struct vmw_res_func vmw_gb_shader_func
= {
114 .res_type
= vmw_res_shader
,
115 .needs_backup
= true,
117 .type_name
= "guest backed shaders",
118 .backup_placement
= &vmw_mob_placement
,
119 .create
= vmw_gb_shader_create
,
120 .destroy
= vmw_gb_shader_destroy
,
121 .bind
= vmw_gb_shader_bind
,
122 .unbind
= vmw_gb_shader_unbind
129 static inline struct vmw_shader
*
130 vmw_res_to_shader(struct vmw_resource
*res
)
132 return container_of(res
, struct vmw_shader
, res
);
135 static void vmw_hw_shader_destroy(struct vmw_resource
*res
)
137 (void) vmw_gb_shader_destroy(res
);
140 static int vmw_gb_shader_init(struct vmw_private
*dev_priv
,
141 struct vmw_resource
*res
,
144 SVGA3dShaderType type
,
145 struct vmw_dma_buffer
*byte_code
,
146 void (*res_free
) (struct vmw_resource
*res
))
148 struct vmw_shader
*shader
= vmw_res_to_shader(res
);
151 ret
= vmw_resource_init(dev_priv
, res
, true,
152 res_free
, &vmw_gb_shader_func
);
155 if (unlikely(ret
!= 0)) {
163 res
->backup_size
= size
;
165 res
->backup
= vmw_dmabuf_reference(byte_code
);
166 res
->backup_offset
= offset
;
171 vmw_resource_activate(res
, vmw_hw_shader_destroy
);
175 static int vmw_gb_shader_create(struct vmw_resource
*res
)
177 struct vmw_private
*dev_priv
= res
->dev_priv
;
178 struct vmw_shader
*shader
= vmw_res_to_shader(res
);
181 SVGA3dCmdHeader header
;
182 SVGA3dCmdDefineGBShader body
;
185 if (likely(res
->id
!= -1))
188 ret
= vmw_resource_alloc_id(res
);
189 if (unlikely(ret
!= 0)) {
190 DRM_ERROR("Failed to allocate a shader id.\n");
194 if (unlikely(res
->id
>= VMWGFX_NUM_GB_SHADER
)) {
199 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
200 if (unlikely(cmd
== NULL
)) {
201 DRM_ERROR("Failed reserving FIFO space for shader "
207 cmd
->header
.id
= SVGA_3D_CMD_DEFINE_GB_SHADER
;
208 cmd
->header
.size
= sizeof(cmd
->body
);
209 cmd
->body
.shid
= res
->id
;
210 cmd
->body
.type
= shader
->type
;
211 cmd
->body
.sizeInBytes
= shader
->size
;
212 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
213 (void) vmw_3d_resource_inc(dev_priv
, false);
218 vmw_resource_release_id(res
);
223 static int vmw_gb_shader_bind(struct vmw_resource
*res
,
224 struct ttm_validate_buffer
*val_buf
)
226 struct vmw_private
*dev_priv
= res
->dev_priv
;
228 SVGA3dCmdHeader header
;
229 SVGA3dCmdBindGBShader body
;
231 struct ttm_buffer_object
*bo
= val_buf
->bo
;
233 BUG_ON(bo
->mem
.mem_type
!= VMW_PL_MOB
);
235 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
236 if (unlikely(cmd
== NULL
)) {
237 DRM_ERROR("Failed reserving FIFO space for shader "
242 cmd
->header
.id
= SVGA_3D_CMD_BIND_GB_SHADER
;
243 cmd
->header
.size
= sizeof(cmd
->body
);
244 cmd
->body
.shid
= res
->id
;
245 cmd
->body
.mobid
= bo
->mem
.start
;
246 cmd
->body
.offsetInBytes
= 0;
247 res
->backup_dirty
= false;
248 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
253 static int vmw_gb_shader_unbind(struct vmw_resource
*res
,
255 struct ttm_validate_buffer
*val_buf
)
257 struct vmw_private
*dev_priv
= res
->dev_priv
;
259 SVGA3dCmdHeader header
;
260 SVGA3dCmdBindGBShader body
;
262 struct vmw_fence_obj
*fence
;
264 BUG_ON(res
->backup
->base
.mem
.mem_type
!= VMW_PL_MOB
);
266 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
267 if (unlikely(cmd
== NULL
)) {
268 DRM_ERROR("Failed reserving FIFO space for shader "
273 cmd
->header
.id
= SVGA_3D_CMD_BIND_GB_SHADER
;
274 cmd
->header
.size
= sizeof(cmd
->body
);
275 cmd
->body
.shid
= res
->id
;
276 cmd
->body
.mobid
= SVGA3D_INVALID_ID
;
277 cmd
->body
.offsetInBytes
= 0;
278 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
281 * Create a fence object and fence the backup buffer.
284 (void) vmw_execbuf_fence_commands(NULL
, dev_priv
,
287 vmw_fence_single_bo(val_buf
->bo
, fence
);
289 if (likely(fence
!= NULL
))
290 vmw_fence_obj_unreference(&fence
);
295 static int vmw_gb_shader_destroy(struct vmw_resource
*res
)
297 struct vmw_private
*dev_priv
= res
->dev_priv
;
299 SVGA3dCmdHeader header
;
300 SVGA3dCmdDestroyGBShader body
;
303 if (likely(res
->id
== -1))
306 mutex_lock(&dev_priv
->binding_mutex
);
307 vmw_context_binding_res_list_scrub(&res
->binding_head
);
309 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
310 if (unlikely(cmd
== NULL
)) {
311 DRM_ERROR("Failed reserving FIFO space for shader "
313 mutex_unlock(&dev_priv
->binding_mutex
);
317 cmd
->header
.id
= SVGA_3D_CMD_DESTROY_GB_SHADER
;
318 cmd
->header
.size
= sizeof(cmd
->body
);
319 cmd
->body
.shid
= res
->id
;
320 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
321 mutex_unlock(&dev_priv
->binding_mutex
);
322 vmw_resource_release_id(res
);
323 vmw_3d_resource_dec(dev_priv
, false);
329 * User-space shader management:
332 static struct vmw_resource
*
333 vmw_user_shader_base_to_res(struct ttm_base_object
*base
)
335 return &(container_of(base
, struct vmw_user_shader
, base
)->
339 static void vmw_user_shader_free(struct vmw_resource
*res
)
341 struct vmw_user_shader
*ushader
=
342 container_of(res
, struct vmw_user_shader
, shader
.res
);
343 struct vmw_private
*dev_priv
= res
->dev_priv
;
345 ttm_base_object_kfree(ushader
, base
);
346 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
347 vmw_user_shader_size
);
351 * This function is called when user space has no more references on the
352 * base object. It releases the base-object's reference on the resource object.
355 static void vmw_user_shader_base_release(struct ttm_base_object
**p_base
)
357 struct ttm_base_object
*base
= *p_base
;
358 struct vmw_resource
*res
= vmw_user_shader_base_to_res(base
);
361 vmw_resource_unreference(&res
);
364 int vmw_shader_destroy_ioctl(struct drm_device
*dev
, void *data
,
365 struct drm_file
*file_priv
)
367 struct drm_vmw_shader_arg
*arg
= (struct drm_vmw_shader_arg
*)data
;
368 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
370 return ttm_ref_object_base_unref(tfile
, arg
->handle
,
374 int vmw_shader_alloc(struct vmw_private
*dev_priv
,
375 struct vmw_dma_buffer
*buffer
,
378 SVGA3dShaderType shader_type
,
379 struct ttm_object_file
*tfile
,
382 struct vmw_user_shader
*ushader
;
383 struct vmw_resource
*res
, *tmp
;
387 * Approximate idr memory usage with 128 bytes. It will be limited
388 * by maximum number_of shaders anyway.
390 if (unlikely(vmw_user_shader_size
== 0))
391 vmw_user_shader_size
=
392 ttm_round_pot(sizeof(struct vmw_user_shader
)) + 128;
394 ret
= ttm_mem_global_alloc(vmw_mem_glob(dev_priv
),
395 vmw_user_shader_size
,
397 if (unlikely(ret
!= 0)) {
398 if (ret
!= -ERESTARTSYS
)
399 DRM_ERROR("Out of graphics memory for shader "
404 ushader
= kzalloc(sizeof(*ushader
), GFP_KERNEL
);
405 if (unlikely(ushader
== NULL
)) {
406 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
407 vmw_user_shader_size
);
412 res
= &ushader
->shader
.res
;
413 ushader
->base
.shareable
= false;
414 ushader
->base
.tfile
= NULL
;
417 * From here on, the destructor takes over resource freeing.
420 ret
= vmw_gb_shader_init(dev_priv
, res
, shader_size
,
421 offset
, shader_type
, buffer
,
422 vmw_user_shader_free
);
423 if (unlikely(ret
!= 0))
426 tmp
= vmw_resource_reference(res
);
427 ret
= ttm_base_object_init(tfile
, &ushader
->base
, false,
429 &vmw_user_shader_base_release
, NULL
);
431 if (unlikely(ret
!= 0)) {
432 vmw_resource_unreference(&tmp
);
437 *handle
= ushader
->base
.hash
.key
;
439 vmw_resource_unreference(&res
);
445 int vmw_shader_define_ioctl(struct drm_device
*dev
, void *data
,
446 struct drm_file
*file_priv
)
448 struct vmw_private
*dev_priv
= vmw_priv(dev
);
449 struct drm_vmw_shader_create_arg
*arg
=
450 (struct drm_vmw_shader_create_arg
*)data
;
451 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
452 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
453 struct vmw_dma_buffer
*buffer
= NULL
;
454 SVGA3dShaderType shader_type
;
457 if (arg
->buffer_handle
!= SVGA3D_INVALID_ID
) {
458 ret
= vmw_user_dmabuf_lookup(tfile
, arg
->buffer_handle
,
460 if (unlikely(ret
!= 0)) {
461 DRM_ERROR("Could not find buffer for shader "
466 if ((u64
)buffer
->base
.num_pages
* PAGE_SIZE
<
467 (u64
)arg
->size
+ (u64
)arg
->offset
) {
468 DRM_ERROR("Illegal buffer- or shader size.\n");
474 switch (arg
->shader_type
) {
475 case drm_vmw_shader_type_vs
:
476 shader_type
= SVGA3D_SHADERTYPE_VS
;
478 case drm_vmw_shader_type_ps
:
479 shader_type
= SVGA3D_SHADERTYPE_PS
;
481 case drm_vmw_shader_type_gs
:
482 shader_type
= SVGA3D_SHADERTYPE_GS
;
485 DRM_ERROR("Illegal shader type.\n");
490 ret
= ttm_read_lock(&vmaster
->lock
, true);
491 if (unlikely(ret
!= 0))
494 ret
= vmw_shader_alloc(dev_priv
, buffer
, arg
->size
, arg
->offset
,
495 shader_type
, tfile
, &arg
->shader_handle
);
497 ttm_read_unlock(&vmaster
->lock
);
499 vmw_dmabuf_unreference(&buffer
);
504 * vmw_compat_shader_lookup - Look up a compat shader
506 * @man: Pointer to the compat shader manager.
507 * @shader_type: The shader type, that combined with the user_key identifies
509 * @user_key: On entry, this should be a pointer to the user_key.
510 * On successful exit, it will contain the guest-backed shader's TTM handle.
512 * Returns 0 on success. Non-zero on failure, in which case the value pointed
513 * to by @user_key is unmodified.
515 int vmw_compat_shader_lookup(struct vmw_compat_shader_manager
*man
,
516 SVGA3dShaderType shader_type
,
519 struct drm_hash_item
*hash
;
521 unsigned long key
= *user_key
| (shader_type
<< 24);
523 ret
= drm_ht_find_item(&man
->shaders
, key
, &hash
);
524 if (unlikely(ret
!= 0))
527 *user_key
= drm_hash_entry(hash
, struct vmw_compat_shader
,
534 * vmw_compat_shader_free - Free a compat shader.
536 * @man: Pointer to the compat shader manager.
537 * @entry: Pointer to a struct vmw_compat_shader.
539 * Frees a struct vmw_compat_shder entry and drops its reference to the
540 * guest backed shader.
542 static void vmw_compat_shader_free(struct vmw_compat_shader_manager
*man
,
543 struct vmw_compat_shader
*entry
)
545 list_del(&entry
->head
);
546 WARN_ON(drm_ht_remove_item(&man
->shaders
, &entry
->hash
));
547 WARN_ON(ttm_ref_object_base_unref(entry
->tfile
, entry
->handle
,
553 * vmw_compat_shaders_commit - Commit a list of compat shader actions.
555 * @man: Pointer to the compat shader manager.
556 * @list: Caller's list of compat shader actions.
558 * This function commits a list of compat shader additions or removals.
559 * It is typically called when the execbuf ioctl call triggering these
560 * actions has commited the fifo contents to the device.
562 void vmw_compat_shaders_commit(struct vmw_compat_shader_manager
*man
,
563 struct list_head
*list
)
565 struct vmw_compat_shader
*entry
, *next
;
567 list_for_each_entry_safe(entry
, next
, list
, head
) {
568 list_del(&entry
->head
);
569 switch (entry
->state
) {
571 entry
->state
= VMW_COMPAT_COMMITED
;
572 list_add_tail(&entry
->head
, &man
->list
);
575 ttm_ref_object_base_unref(entry
->tfile
, entry
->handle
,
587 * vmw_compat_shaders_revert - Revert a list of compat shader actions
589 * @man: Pointer to the compat shader manager.
590 * @list: Caller's list of compat shader actions.
592 * This function reverts a list of compat shader additions or removals.
593 * It is typically called when the execbuf ioctl call triggering these
594 * actions failed for some reason, and the command stream was never
597 void vmw_compat_shaders_revert(struct vmw_compat_shader_manager
*man
,
598 struct list_head
*list
)
600 struct vmw_compat_shader
*entry
, *next
;
603 list_for_each_entry_safe(entry
, next
, list
, head
) {
604 switch (entry
->state
) {
606 vmw_compat_shader_free(man
, entry
);
609 ret
= drm_ht_insert_item(&man
->shaders
, &entry
->hash
);
610 list_del(&entry
->head
);
611 list_add_tail(&entry
->head
, &man
->list
);
612 entry
->state
= VMW_COMPAT_COMMITED
;
622 * vmw_compat_shader_remove - Stage a compat shader for removal.
624 * @man: Pointer to the compat shader manager
625 * @user_key: The key that is used to identify the shader. The key is
626 * unique to the shader type.
627 * @shader_type: Shader type.
628 * @list: Caller's list of staged shader actions.
630 * This function stages a compat shader for removal and removes the key from
631 * the shader manager's hash table. If the shader was previously only staged
632 * for addition it is completely removed (But the execbuf code may keep a
633 * reference if it was bound to a context between addition and removal). If
634 * it was previously commited to the manager, it is staged for removal.
636 int vmw_compat_shader_remove(struct vmw_compat_shader_manager
*man
,
637 u32 user_key
, SVGA3dShaderType shader_type
,
638 struct list_head
*list
)
640 struct vmw_compat_shader
*entry
;
641 struct drm_hash_item
*hash
;
644 ret
= drm_ht_find_item(&man
->shaders
, user_key
| (shader_type
<< 24),
646 if (likely(ret
!= 0))
649 entry
= drm_hash_entry(hash
, struct vmw_compat_shader
, hash
);
651 switch (entry
->state
) {
653 vmw_compat_shader_free(man
, entry
);
655 case VMW_COMPAT_COMMITED
:
656 (void) drm_ht_remove_item(&man
->shaders
, &entry
->hash
);
657 list_del(&entry
->head
);
658 entry
->state
= VMW_COMPAT_DEL
;
659 list_add_tail(&entry
->head
, list
);
670 * vmw_compat_shader_add - Create a compat shader and add the
673 * @man: Pointer to the compat shader manager
674 * @user_key: The key that is used to identify the shader. The key is
675 * unique to the shader type.
676 * @bytecode: Pointer to the bytecode of the shader.
677 * @shader_type: Shader type.
678 * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
679 * to be created with.
680 * @list: Caller's list of staged shader actions.
682 * Note that only the key is added to the shader manager's hash table.
683 * The shader is not yet added to the shader manager's list of shaders.
685 int vmw_compat_shader_add(struct vmw_compat_shader_manager
*man
,
686 u32 user_key
, const void *bytecode
,
687 SVGA3dShaderType shader_type
,
689 struct ttm_object_file
*tfile
,
690 struct list_head
*list
)
692 struct vmw_dma_buffer
*buf
;
693 struct ttm_bo_kmap_obj map
;
695 struct vmw_compat_shader
*compat
;
699 if (user_key
> ((1 << 24) - 1) || (unsigned) shader_type
> 16)
702 /* Allocate and pin a DMA buffer */
703 buf
= kzalloc(sizeof(*buf
), GFP_KERNEL
);
704 if (unlikely(buf
== NULL
))
707 ret
= vmw_dmabuf_init(man
->dev_priv
, buf
, size
, &vmw_sys_ne_placement
,
708 true, vmw_dmabuf_bo_free
);
709 if (unlikely(ret
!= 0))
712 ret
= ttm_bo_reserve(&buf
->base
, false, true, false, NULL
);
713 if (unlikely(ret
!= 0))
716 /* Map and copy shader bytecode. */
717 ret
= ttm_bo_kmap(&buf
->base
, 0, PAGE_ALIGN(size
) >> PAGE_SHIFT
,
719 if (unlikely(ret
!= 0)) {
720 ttm_bo_unreserve(&buf
->base
);
724 memcpy(ttm_kmap_obj_virtual(&map
, &is_iomem
), bytecode
, size
);
728 ret
= ttm_bo_validate(&buf
->base
, &vmw_sys_placement
, false, true);
730 ttm_bo_unreserve(&buf
->base
);
732 /* Create a guest-backed shader container backed by the dma buffer */
733 ret
= vmw_shader_alloc(man
->dev_priv
, buf
, size
, 0, shader_type
,
735 vmw_dmabuf_unreference(&buf
);
736 if (unlikely(ret
!= 0))
739 * Create a compat shader structure and stage it for insertion
742 compat
= kzalloc(sizeof(*compat
), GFP_KERNEL
);
746 compat
->hash
.key
= user_key
| (shader_type
<< 24);
747 ret
= drm_ht_insert_item(&man
->shaders
, &compat
->hash
);
748 if (unlikely(ret
!= 0))
749 goto out_invalid_key
;
751 compat
->state
= VMW_COMPAT_ADD
;
752 compat
->handle
= handle
;
753 compat
->tfile
= tfile
;
754 list_add_tail(&compat
->head
, list
);
761 ttm_ref_object_base_unref(tfile
, handle
, TTM_REF_USAGE
);
768 * vmw_compat_shader_man_create - Create a compat shader manager
770 * @dev_priv: Pointer to a device private structure.
772 * Typically done at file open time. If successful returns a pointer to a
773 * compat shader manager. Otherwise returns an error pointer.
775 struct vmw_compat_shader_manager
*
776 vmw_compat_shader_man_create(struct vmw_private
*dev_priv
)
778 struct vmw_compat_shader_manager
*man
;
781 man
= kzalloc(sizeof(*man
), GFP_KERNEL
);
783 man
->dev_priv
= dev_priv
;
784 INIT_LIST_HEAD(&man
->list
);
785 ret
= drm_ht_create(&man
->shaders
, VMW_COMPAT_SHADER_HT_ORDER
);
794 * vmw_compat_shader_man_destroy - Destroy a compat shader manager
796 * @man: Pointer to the shader manager to destroy.
798 * Typically done at file close time.
800 void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager
*man
)
802 struct vmw_compat_shader
*entry
, *next
;
804 mutex_lock(&man
->dev_priv
->cmdbuf_mutex
);
805 list_for_each_entry_safe(entry
, next
, &man
->list
, head
)
806 vmw_compat_shader_free(man
, entry
);
808 mutex_unlock(&man
->dev_priv
->cmdbuf_mutex
);