1 /**************************************************************************
3 * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_reg.h"
30 #include <drm/ttm/ttm_bo_api.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include "vmwgfx_so.h"
33 #include "vmwgfx_binding.h"
35 #define VMW_RES_HT_ORDER 12
38 * struct vmw_resource_relocation - Relocation info for resources
40 * @head: List head for the software context's relocation list.
41 * @res: Non-ref-counted pointer to the resource.
42 * @offset: Offset of 4 byte entries into the command buffer where the
43 * id that needs fixup is located.
45 struct vmw_resource_relocation
{
46 struct list_head head
;
47 const struct vmw_resource
*res
;
52 * struct vmw_resource_val_node - Validation info for resources
54 * @head: List head for the software context's resource list.
55 * @hash: Hash entry for quick resouce to val_node lookup.
56 * @res: Ref-counted pointer to the resource.
57 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
58 * @new_backup: Refcounted pointer to the new backup buffer.
59 * @staged_bindings: If @res is a context, tracks bindings set up during
60 * the command batch. Otherwise NULL.
61 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
62 * @first_usage: Set to true the first time the resource is referenced in
64 * @switching_backup: The command stream provides a new backup buffer for a
66 * @no_buffer_needed: This means @switching_backup is true on first buffer
67 * reference. So resource reservation does not need to allocate a backup
68 * buffer for the resource.
70 struct vmw_resource_val_node
{
71 struct list_head head
;
72 struct drm_hash_item hash
;
73 struct vmw_resource
*res
;
74 struct vmw_dma_buffer
*new_backup
;
75 struct vmw_ctx_binding_state
*staged_bindings
;
76 unsigned long new_backup_offset
;
78 u32 switching_backup
: 1;
79 u32 no_buffer_needed
: 1;
83 * struct vmw_cmd_entry - Describe a command for the verifier
85 * @user_allow: Whether allowed from the execbuf ioctl.
86 * @gb_disable: Whether disabled if guest-backed objects are available.
87 * @gb_enable: Whether enabled iff guest-backed objects are available.
89 struct vmw_cmd_entry
{
90 int (*func
) (struct vmw_private
*, struct vmw_sw_context
*,
97 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
98 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
99 (_gb_disable), (_gb_enable)}
101 static int vmw_resource_context_res_add(struct vmw_private
*dev_priv
,
102 struct vmw_sw_context
*sw_context
,
103 struct vmw_resource
*ctx
);
104 static int vmw_translate_mob_ptr(struct vmw_private
*dev_priv
,
105 struct vmw_sw_context
*sw_context
,
107 struct vmw_dma_buffer
**vmw_bo_p
);
108 static int vmw_bo_to_validate_list(struct vmw_sw_context
*sw_context
,
109 struct vmw_dma_buffer
*vbo
,
110 bool validate_as_mob
,
111 uint32_t *p_val_node
);
115 * vmw_resources_unreserve - unreserve resources previously reserved for
116 * command submission.
118 * @sw_context: pointer to the software context
119 * @backoff: Whether command submission failed.
121 static void vmw_resources_unreserve(struct vmw_sw_context
*sw_context
,
124 struct vmw_resource_val_node
*val
;
125 struct list_head
*list
= &sw_context
->resource_list
;
127 if (sw_context
->dx_query_mob
&& !backoff
)
128 vmw_context_bind_dx_query(sw_context
->dx_query_ctx
,
129 sw_context
->dx_query_mob
);
131 list_for_each_entry(val
, list
, head
) {
132 struct vmw_resource
*res
= val
->res
;
134 (backoff
) ? false : val
->switching_backup
;
137 * Transfer staged context bindings to the
138 * persistent context binding tracker.
140 if (unlikely(val
->staged_bindings
)) {
142 vmw_binding_state_commit
143 (vmw_context_binding_state(val
->res
),
144 val
->staged_bindings
);
147 if (val
->staged_bindings
!= sw_context
->staged_bindings
)
148 vmw_binding_state_free(val
->staged_bindings
);
150 sw_context
->staged_bindings_inuse
= false;
151 val
->staged_bindings
= NULL
;
153 vmw_resource_unreserve(res
, switch_backup
, val
->new_backup
,
154 val
->new_backup_offset
);
155 vmw_dmabuf_unreference(&val
->new_backup
);
160 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
161 * added to the validate list.
163 * @dev_priv: Pointer to the device private:
164 * @sw_context: The validation context:
165 * @node: The validation node holding this context.
167 static int vmw_cmd_ctx_first_setup(struct vmw_private
*dev_priv
,
168 struct vmw_sw_context
*sw_context
,
169 struct vmw_resource_val_node
*node
)
173 ret
= vmw_resource_context_res_add(dev_priv
, sw_context
, node
->res
);
174 if (unlikely(ret
!= 0))
177 if (!sw_context
->staged_bindings
) {
178 sw_context
->staged_bindings
=
179 vmw_binding_state_alloc(dev_priv
);
180 if (IS_ERR(sw_context
->staged_bindings
)) {
181 DRM_ERROR("Failed to allocate context binding "
183 ret
= PTR_ERR(sw_context
->staged_bindings
);
184 sw_context
->staged_bindings
= NULL
;
189 if (sw_context
->staged_bindings_inuse
) {
190 node
->staged_bindings
= vmw_binding_state_alloc(dev_priv
);
191 if (IS_ERR(node
->staged_bindings
)) {
192 DRM_ERROR("Failed to allocate context binding "
194 ret
= PTR_ERR(node
->staged_bindings
);
195 node
->staged_bindings
= NULL
;
199 node
->staged_bindings
= sw_context
->staged_bindings
;
200 sw_context
->staged_bindings_inuse
= true;
209 * vmw_resource_val_add - Add a resource to the software context's
210 * resource list if it's not already on it.
212 * @sw_context: Pointer to the software context.
213 * @res: Pointer to the resource.
214 * @p_node On successful return points to a valid pointer to a
215 * struct vmw_resource_val_node, if non-NULL on entry.
217 static int vmw_resource_val_add(struct vmw_sw_context
*sw_context
,
218 struct vmw_resource
*res
,
219 struct vmw_resource_val_node
**p_node
)
221 struct vmw_private
*dev_priv
= res
->dev_priv
;
222 struct vmw_resource_val_node
*node
;
223 struct drm_hash_item
*hash
;
226 if (likely(drm_ht_find_item(&sw_context
->res_ht
, (unsigned long) res
,
228 node
= container_of(hash
, struct vmw_resource_val_node
, hash
);
229 node
->first_usage
= false;
230 if (unlikely(p_node
!= NULL
))
235 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
236 if (unlikely(node
== NULL
)) {
237 DRM_ERROR("Failed to allocate a resource validation "
242 node
->hash
.key
= (unsigned long) res
;
243 ret
= drm_ht_insert_item(&sw_context
->res_ht
, &node
->hash
);
244 if (unlikely(ret
!= 0)) {
245 DRM_ERROR("Failed to initialize a resource validation "
250 node
->res
= vmw_resource_reference(res
);
251 node
->first_usage
= true;
252 if (unlikely(p_node
!= NULL
))
255 if (!dev_priv
->has_mob
) {
256 list_add_tail(&node
->head
, &sw_context
->resource_list
);
260 switch (vmw_res_type(res
)) {
261 case vmw_res_context
:
262 case vmw_res_dx_context
:
263 list_add(&node
->head
, &sw_context
->ctx_resource_list
);
264 ret
= vmw_cmd_ctx_first_setup(dev_priv
, sw_context
, node
);
266 case vmw_res_cotable
:
267 list_add_tail(&node
->head
, &sw_context
->ctx_resource_list
);
270 list_add_tail(&node
->head
, &sw_context
->resource_list
);
278 * vmw_view_res_val_add - Add a view and the surface it's pointing to
279 * to the validation list
281 * @sw_context: The software context holding the validation list.
282 * @view: Pointer to the view resource.
284 * Returns 0 if success, negative error code otherwise.
286 static int vmw_view_res_val_add(struct vmw_sw_context
*sw_context
,
287 struct vmw_resource
*view
)
292 * First add the resource the view is pointing to, otherwise
293 * it may be swapped out when the view is validated.
295 ret
= vmw_resource_val_add(sw_context
, vmw_view_srf(view
), NULL
);
299 return vmw_resource_val_add(sw_context
, view
, NULL
);
303 * vmw_view_id_val_add - Look up a view and add it and the surface it's
304 * pointing to to the validation list.
306 * @sw_context: The software context holding the validation list.
307 * @view_type: The view type to look up.
308 * @id: view id of the view.
310 * The view is represented by a view id and the DX context it's created on,
311 * or scheduled for creation on. If there is no DX context set, the function
312 * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
314 static int vmw_view_id_val_add(struct vmw_sw_context
*sw_context
,
315 enum vmw_view_type view_type
, u32 id
)
317 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
318 struct vmw_resource
*view
;
322 DRM_ERROR("DX Context not set.\n");
326 view
= vmw_view_lookup(sw_context
->man
, view_type
, id
);
328 return PTR_ERR(view
);
330 ret
= vmw_view_res_val_add(sw_context
, view
);
331 vmw_resource_unreference(&view
);
337 * vmw_resource_context_res_add - Put resources previously bound to a context on
338 * the validation list
340 * @dev_priv: Pointer to a device private structure
341 * @sw_context: Pointer to a software context used for this command submission
342 * @ctx: Pointer to the context resource
344 * This function puts all resources that were previously bound to @ctx on
345 * the resource validation list. This is part of the context state reemission
347 static int vmw_resource_context_res_add(struct vmw_private
*dev_priv
,
348 struct vmw_sw_context
*sw_context
,
349 struct vmw_resource
*ctx
)
351 struct list_head
*binding_list
;
352 struct vmw_ctx_bindinfo
*entry
;
354 struct vmw_resource
*res
;
357 /* Add all cotables to the validation list. */
358 if (dev_priv
->has_dx
&& vmw_res_type(ctx
) == vmw_res_dx_context
) {
359 for (i
= 0; i
< SVGA_COTABLE_DX10_MAX
; ++i
) {
360 res
= vmw_context_cotable(ctx
, i
);
364 ret
= vmw_resource_val_add(sw_context
, res
, NULL
);
365 vmw_resource_unreference(&res
);
366 if (unlikely(ret
!= 0))
372 /* Add all resources bound to the context to the validation list */
373 mutex_lock(&dev_priv
->binding_mutex
);
374 binding_list
= vmw_context_binding_list(ctx
);
376 list_for_each_entry(entry
, binding_list
, ctx_list
) {
377 /* entry->res is not refcounted */
378 res
= vmw_resource_reference_unless_doomed(entry
->res
);
379 if (unlikely(res
== NULL
))
382 if (vmw_res_type(entry
->res
) == vmw_res_view
)
383 ret
= vmw_view_res_val_add(sw_context
, entry
->res
);
385 ret
= vmw_resource_val_add(sw_context
, entry
->res
,
387 vmw_resource_unreference(&res
);
388 if (unlikely(ret
!= 0))
392 if (dev_priv
->has_dx
&& vmw_res_type(ctx
) == vmw_res_dx_context
) {
393 struct vmw_dma_buffer
*dx_query_mob
;
395 dx_query_mob
= vmw_context_get_dx_query_mob(ctx
);
397 ret
= vmw_bo_to_validate_list(sw_context
,
402 mutex_unlock(&dev_priv
->binding_mutex
);
407 * vmw_resource_relocation_add - Add a relocation to the relocation list
409 * @list: Pointer to head of relocation list.
410 * @res: The resource.
411 * @offset: Offset into the command buffer currently being parsed where the
412 * id that needs fixup is located. Granularity is 4 bytes.
414 static int vmw_resource_relocation_add(struct list_head
*list
,
415 const struct vmw_resource
*res
,
416 unsigned long offset
)
418 struct vmw_resource_relocation
*rel
;
420 rel
= kmalloc(sizeof(*rel
), GFP_KERNEL
);
421 if (unlikely(rel
== NULL
)) {
422 DRM_ERROR("Failed to allocate a resource relocation.\n");
427 rel
->offset
= offset
;
428 list_add_tail(&rel
->head
, list
);
434 * vmw_resource_relocations_free - Free all relocations on a list
436 * @list: Pointer to the head of the relocation list.
438 static void vmw_resource_relocations_free(struct list_head
*list
)
440 struct vmw_resource_relocation
*rel
, *n
;
442 list_for_each_entry_safe(rel
, n
, list
, head
) {
443 list_del(&rel
->head
);
449 * vmw_resource_relocations_apply - Apply all relocations on a list
451 * @cb: Pointer to the start of the command buffer bein patch. This need
452 * not be the same buffer as the one being parsed when the relocation
453 * list was built, but the contents must be the same modulo the
455 * @list: Pointer to the head of the relocation list.
457 static void vmw_resource_relocations_apply(uint32_t *cb
,
458 struct list_head
*list
)
460 struct vmw_resource_relocation
*rel
;
462 list_for_each_entry(rel
, list
, head
) {
463 if (likely(rel
->res
!= NULL
))
464 cb
[rel
->offset
] = rel
->res
->id
;
466 cb
[rel
->offset
] = SVGA_3D_CMD_NOP
;
470 static int vmw_cmd_invalid(struct vmw_private
*dev_priv
,
471 struct vmw_sw_context
*sw_context
,
472 SVGA3dCmdHeader
*header
)
474 return capable(CAP_SYS_ADMIN
) ? : -EINVAL
;
477 static int vmw_cmd_ok(struct vmw_private
*dev_priv
,
478 struct vmw_sw_context
*sw_context
,
479 SVGA3dCmdHeader
*header
)
485 * vmw_bo_to_validate_list - add a bo to a validate list
487 * @sw_context: The software context used for this command submission batch.
488 * @bo: The buffer object to add.
489 * @validate_as_mob: Validate this buffer as a MOB.
490 * @p_val_node: If non-NULL Will be updated with the validate node number
493 * Returns -EINVAL if the limit of number of buffer objects per command
494 * submission is reached.
496 static int vmw_bo_to_validate_list(struct vmw_sw_context
*sw_context
,
497 struct vmw_dma_buffer
*vbo
,
498 bool validate_as_mob
,
499 uint32_t *p_val_node
)
502 struct vmw_validate_buffer
*vval_buf
;
503 struct ttm_validate_buffer
*val_buf
;
504 struct drm_hash_item
*hash
;
507 if (likely(drm_ht_find_item(&sw_context
->res_ht
, (unsigned long) vbo
,
509 vval_buf
= container_of(hash
, struct vmw_validate_buffer
,
511 if (unlikely(vval_buf
->validate_as_mob
!= validate_as_mob
)) {
512 DRM_ERROR("Inconsistent buffer usage.\n");
515 val_buf
= &vval_buf
->base
;
516 val_node
= vval_buf
- sw_context
->val_bufs
;
518 val_node
= sw_context
->cur_val_buf
;
519 if (unlikely(val_node
>= VMWGFX_MAX_VALIDATIONS
)) {
520 DRM_ERROR("Max number of DMA buffers per submission "
524 vval_buf
= &sw_context
->val_bufs
[val_node
];
525 vval_buf
->hash
.key
= (unsigned long) vbo
;
526 ret
= drm_ht_insert_item(&sw_context
->res_ht
, &vval_buf
->hash
);
527 if (unlikely(ret
!= 0)) {
528 DRM_ERROR("Failed to initialize a buffer validation "
532 ++sw_context
->cur_val_buf
;
533 val_buf
= &vval_buf
->base
;
534 val_buf
->bo
= ttm_bo_reference(&vbo
->base
);
535 val_buf
->shared
= false;
536 list_add_tail(&val_buf
->head
, &sw_context
->validate_nodes
);
537 vval_buf
->validate_as_mob
= validate_as_mob
;
541 *p_val_node
= val_node
;
547 * vmw_resources_reserve - Reserve all resources on the sw_context's
550 * @sw_context: Pointer to the software context.
552 * Note that since vmware's command submission currently is protected by
553 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
554 * since only a single thread at once will attempt this.
556 static int vmw_resources_reserve(struct vmw_sw_context
*sw_context
)
558 struct vmw_resource_val_node
*val
;
561 list_for_each_entry(val
, &sw_context
->resource_list
, head
) {
562 struct vmw_resource
*res
= val
->res
;
564 ret
= vmw_resource_reserve(res
, true, val
->no_buffer_needed
);
565 if (unlikely(ret
!= 0))
569 struct vmw_dma_buffer
*vbo
= res
->backup
;
571 ret
= vmw_bo_to_validate_list
573 vmw_resource_needs_backup(res
), NULL
);
575 if (unlikely(ret
!= 0))
580 if (sw_context
->dx_query_mob
) {
581 struct vmw_dma_buffer
*expected_dx_query_mob
;
583 expected_dx_query_mob
=
584 vmw_context_get_dx_query_mob(sw_context
->dx_query_ctx
);
585 if (expected_dx_query_mob
&&
586 expected_dx_query_mob
!= sw_context
->dx_query_mob
) {
595 * vmw_resources_validate - Validate all resources on the sw_context's
598 * @sw_context: Pointer to the software context.
600 * Before this function is called, all resource backup buffers must have
603 static int vmw_resources_validate(struct vmw_sw_context
*sw_context
)
605 struct vmw_resource_val_node
*val
;
608 list_for_each_entry(val
, &sw_context
->resource_list
, head
) {
609 struct vmw_resource
*res
= val
->res
;
610 struct vmw_dma_buffer
*backup
= res
->backup
;
612 ret
= vmw_resource_validate(res
);
613 if (unlikely(ret
!= 0)) {
614 if (ret
!= -ERESTARTSYS
)
615 DRM_ERROR("Failed to validate resource.\n");
619 /* Check if the resource switched backup buffer */
620 if (backup
&& res
->backup
&& (backup
!= res
->backup
)) {
621 struct vmw_dma_buffer
*vbo
= res
->backup
;
623 ret
= vmw_bo_to_validate_list
625 vmw_resource_needs_backup(res
), NULL
);
627 ttm_bo_unreserve(&vbo
->base
);
636 * vmw_cmd_res_reloc_add - Add a resource to a software context's
637 * relocation- and validation lists.
639 * @dev_priv: Pointer to a struct vmw_private identifying the device.
640 * @sw_context: Pointer to the software context.
641 * @id_loc: Pointer to where the id that needs translation is located.
642 * @res: Valid pointer to a struct vmw_resource.
643 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
644 * used for this resource is returned here.
646 static int vmw_cmd_res_reloc_add(struct vmw_private
*dev_priv
,
647 struct vmw_sw_context
*sw_context
,
649 struct vmw_resource
*res
,
650 struct vmw_resource_val_node
**p_val
)
653 struct vmw_resource_val_node
*node
;
656 ret
= vmw_resource_relocation_add(&sw_context
->res_relocations
,
658 id_loc
- sw_context
->buf_start
);
659 if (unlikely(ret
!= 0))
662 ret
= vmw_resource_val_add(sw_context
, res
, &node
);
663 if (unlikely(ret
!= 0))
674 * vmw_cmd_res_check - Check that a resource is present and if so, put it
675 * on the resource validate list unless it's already there.
677 * @dev_priv: Pointer to a device private structure.
678 * @sw_context: Pointer to the software context.
679 * @res_type: Resource type.
680 * @converter: User-space visisble type specific information.
681 * @id_loc: Pointer to the location in the command buffer currently being
682 * parsed from where the user-space resource id handle is located.
683 * @p_val: Pointer to pointer to resource validalidation node. Populated
687 vmw_cmd_res_check(struct vmw_private
*dev_priv
,
688 struct vmw_sw_context
*sw_context
,
689 enum vmw_res_type res_type
,
690 const struct vmw_user_resource_conv
*converter
,
692 struct vmw_resource_val_node
**p_val
)
694 struct vmw_res_cache_entry
*rcache
=
695 &sw_context
->res_cache
[res_type
];
696 struct vmw_resource
*res
;
697 struct vmw_resource_val_node
*node
;
700 if (*id_loc
== SVGA3D_INVALID_ID
) {
703 if (res_type
== vmw_res_context
) {
704 DRM_ERROR("Illegal context invalid id.\n");
711 * Fastpath in case of repeated commands referencing the same
715 if (likely(rcache
->valid
&& *id_loc
== rcache
->handle
)) {
716 const struct vmw_resource
*res
= rcache
->res
;
718 rcache
->node
->first_usage
= false;
720 *p_val
= rcache
->node
;
722 return vmw_resource_relocation_add
723 (&sw_context
->res_relocations
, res
,
724 id_loc
- sw_context
->buf_start
);
727 ret
= vmw_user_resource_lookup_handle(dev_priv
,
728 sw_context
->fp
->tfile
,
732 if (unlikely(ret
!= 0)) {
733 DRM_ERROR("Could not find or use resource 0x%08x.\n",
739 rcache
->valid
= true;
741 rcache
->handle
= *id_loc
;
743 ret
= vmw_cmd_res_reloc_add(dev_priv
, sw_context
, id_loc
,
745 if (unlikely(ret
!= 0))
751 vmw_resource_unreference(&res
);
755 BUG_ON(sw_context
->error_resource
!= NULL
);
756 sw_context
->error_resource
= res
;
762 * vmw_rebind_dx_query - Rebind DX query associated with the context
764 * @ctx_res: context the query belongs to
766 * This function assumes binding_mutex is held.
768 static int vmw_rebind_all_dx_query(struct vmw_resource
*ctx_res
)
770 struct vmw_private
*dev_priv
= ctx_res
->dev_priv
;
771 struct vmw_dma_buffer
*dx_query_mob
;
773 SVGA3dCmdHeader header
;
774 SVGA3dCmdDXBindAllQuery body
;
778 dx_query_mob
= vmw_context_get_dx_query_mob(ctx_res
);
780 if (!dx_query_mob
|| dx_query_mob
->dx_query_ctx
)
783 cmd
= vmw_fifo_reserve_dx(dev_priv
, sizeof(*cmd
), ctx_res
->id
);
786 DRM_ERROR("Failed to rebind queries.\n");
790 cmd
->header
.id
= SVGA_3D_CMD_DX_BIND_ALL_QUERY
;
791 cmd
->header
.size
= sizeof(cmd
->body
);
792 cmd
->body
.cid
= ctx_res
->id
;
793 cmd
->body
.mobid
= dx_query_mob
->base
.mem
.start
;
794 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
796 vmw_context_bind_dx_query(ctx_res
, dx_query_mob
);
802 * vmw_rebind_contexts - Rebind all resources previously bound to
803 * referenced contexts.
805 * @sw_context: Pointer to the software context.
807 * Rebind context binding points that have been scrubbed because of eviction.
809 static int vmw_rebind_contexts(struct vmw_sw_context
*sw_context
)
811 struct vmw_resource_val_node
*val
;
814 list_for_each_entry(val
, &sw_context
->resource_list
, head
) {
815 if (unlikely(!val
->staged_bindings
))
818 ret
= vmw_binding_rebind_all
819 (vmw_context_binding_state(val
->res
));
820 if (unlikely(ret
!= 0)) {
821 if (ret
!= -ERESTARTSYS
)
822 DRM_ERROR("Failed to rebind context.\n");
826 ret
= vmw_rebind_all_dx_query(val
->res
);
835 * vmw_view_bindings_add - Add an array of view bindings to a context
836 * binding state tracker.
838 * @sw_context: The execbuf state used for this command.
839 * @view_type: View type for the bindings.
840 * @binding_type: Binding type for the bindings.
841 * @shader_slot: The shader slot to user for the bindings.
842 * @view_ids: Array of view ids to be bound.
843 * @num_views: Number of view ids in @view_ids.
844 * @first_slot: The binding slot to be used for the first view id in @view_ids.
846 static int vmw_view_bindings_add(struct vmw_sw_context
*sw_context
,
847 enum vmw_view_type view_type
,
848 enum vmw_ctx_binding_type binding_type
,
850 uint32 view_ids
[], u32 num_views
,
853 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
854 struct vmw_cmdbuf_res_manager
*man
;
859 DRM_ERROR("DX Context not set.\n");
863 man
= sw_context
->man
;
864 for (i
= 0; i
< num_views
; ++i
) {
865 struct vmw_ctx_bindinfo_view binding
;
866 struct vmw_resource
*view
= NULL
;
868 if (view_ids
[i
] != SVGA3D_INVALID_ID
) {
869 view
= vmw_view_lookup(man
, view_type
, view_ids
[i
]);
871 DRM_ERROR("View not found.\n");
872 return PTR_ERR(view
);
875 ret
= vmw_view_res_val_add(sw_context
, view
);
877 DRM_ERROR("Could not add view to "
878 "validation list.\n");
879 vmw_resource_unreference(&view
);
883 binding
.bi
.ctx
= ctx_node
->res
;
884 binding
.bi
.res
= view
;
885 binding
.bi
.bt
= binding_type
;
886 binding
.shader_slot
= shader_slot
;
887 binding
.slot
= first_slot
+ i
;
888 vmw_binding_add(ctx_node
->staged_bindings
, &binding
.bi
,
889 shader_slot
, binding
.slot
);
891 vmw_resource_unreference(&view
);
898 * vmw_cmd_cid_check - Check a command header for valid context information.
900 * @dev_priv: Pointer to a device private structure.
901 * @sw_context: Pointer to the software context.
902 * @header: A command header with an embedded user-space context handle.
904 * Convenience function: Call vmw_cmd_res_check with the user-space context
905 * handle embedded in @header.
907 static int vmw_cmd_cid_check(struct vmw_private
*dev_priv
,
908 struct vmw_sw_context
*sw_context
,
909 SVGA3dCmdHeader
*header
)
912 SVGA3dCmdHeader header
;
916 cmd
= container_of(header
, struct vmw_cid_cmd
, header
);
917 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
918 user_context_converter
, &cmd
->cid
, NULL
);
921 static int vmw_cmd_set_render_target_check(struct vmw_private
*dev_priv
,
922 struct vmw_sw_context
*sw_context
,
923 SVGA3dCmdHeader
*header
)
926 SVGA3dCmdHeader header
;
927 SVGA3dCmdSetRenderTarget body
;
929 struct vmw_resource_val_node
*ctx_node
;
930 struct vmw_resource_val_node
*res_node
;
933 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
935 if (cmd
->body
.type
>= SVGA3D_RT_MAX
) {
936 DRM_ERROR("Illegal render target type %u.\n",
937 (unsigned) cmd
->body
.type
);
941 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
942 user_context_converter
, &cmd
->body
.cid
,
944 if (unlikely(ret
!= 0))
947 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
948 user_surface_converter
,
949 &cmd
->body
.target
.sid
, &res_node
);
950 if (unlikely(ret
!= 0))
953 if (dev_priv
->has_mob
) {
954 struct vmw_ctx_bindinfo_view binding
;
956 binding
.bi
.ctx
= ctx_node
->res
;
957 binding
.bi
.res
= res_node
? res_node
->res
: NULL
;
958 binding
.bi
.bt
= vmw_ctx_binding_rt
;
959 binding
.slot
= cmd
->body
.type
;
960 vmw_binding_add(ctx_node
->staged_bindings
,
961 &binding
.bi
, 0, binding
.slot
);
967 static int vmw_cmd_surface_copy_check(struct vmw_private
*dev_priv
,
968 struct vmw_sw_context
*sw_context
,
969 SVGA3dCmdHeader
*header
)
972 SVGA3dCmdHeader header
;
973 SVGA3dCmdSurfaceCopy body
;
977 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
979 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
980 user_surface_converter
,
981 &cmd
->body
.src
.sid
, NULL
);
985 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
986 user_surface_converter
,
987 &cmd
->body
.dest
.sid
, NULL
);
990 static int vmw_cmd_buffer_copy_check(struct vmw_private
*dev_priv
,
991 struct vmw_sw_context
*sw_context
,
992 SVGA3dCmdHeader
*header
)
995 SVGA3dCmdHeader header
;
996 SVGA3dCmdDXBufferCopy body
;
1000 cmd
= container_of(header
, typeof(*cmd
), header
);
1001 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1002 user_surface_converter
,
1003 &cmd
->body
.src
, NULL
);
1007 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1008 user_surface_converter
,
1009 &cmd
->body
.dest
, NULL
);
1012 static int vmw_cmd_pred_copy_check(struct vmw_private
*dev_priv
,
1013 struct vmw_sw_context
*sw_context
,
1014 SVGA3dCmdHeader
*header
)
1017 SVGA3dCmdHeader header
;
1018 SVGA3dCmdDXPredCopyRegion body
;
1022 cmd
= container_of(header
, typeof(*cmd
), header
);
1023 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1024 user_surface_converter
,
1025 &cmd
->body
.srcSid
, NULL
);
1029 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1030 user_surface_converter
,
1031 &cmd
->body
.dstSid
, NULL
);
1034 static int vmw_cmd_stretch_blt_check(struct vmw_private
*dev_priv
,
1035 struct vmw_sw_context
*sw_context
,
1036 SVGA3dCmdHeader
*header
)
1038 struct vmw_sid_cmd
{
1039 SVGA3dCmdHeader header
;
1040 SVGA3dCmdSurfaceStretchBlt body
;
1044 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
1045 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1046 user_surface_converter
,
1047 &cmd
->body
.src
.sid
, NULL
);
1048 if (unlikely(ret
!= 0))
1050 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1051 user_surface_converter
,
1052 &cmd
->body
.dest
.sid
, NULL
);
1055 static int vmw_cmd_blt_surf_screen_check(struct vmw_private
*dev_priv
,
1056 struct vmw_sw_context
*sw_context
,
1057 SVGA3dCmdHeader
*header
)
1059 struct vmw_sid_cmd
{
1060 SVGA3dCmdHeader header
;
1061 SVGA3dCmdBlitSurfaceToScreen body
;
1064 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
1066 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1067 user_surface_converter
,
1068 &cmd
->body
.srcImage
.sid
, NULL
);
1071 static int vmw_cmd_present_check(struct vmw_private
*dev_priv
,
1072 struct vmw_sw_context
*sw_context
,
1073 SVGA3dCmdHeader
*header
)
1075 struct vmw_sid_cmd
{
1076 SVGA3dCmdHeader header
;
1077 SVGA3dCmdPresent body
;
1081 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
1083 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1084 user_surface_converter
, &cmd
->body
.sid
,
1089 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1091 * @dev_priv: The device private structure.
1092 * @new_query_bo: The new buffer holding query results.
1093 * @sw_context: The software context used for this command submission.
1095 * This function checks whether @new_query_bo is suitable for holding
1096 * query results, and if another buffer currently is pinned for query
1097 * results. If so, the function prepares the state of @sw_context for
1098 * switching pinned buffers after successful submission of the current
1101 static int vmw_query_bo_switch_prepare(struct vmw_private
*dev_priv
,
1102 struct vmw_dma_buffer
*new_query_bo
,
1103 struct vmw_sw_context
*sw_context
)
1105 struct vmw_res_cache_entry
*ctx_entry
=
1106 &sw_context
->res_cache
[vmw_res_context
];
1109 BUG_ON(!ctx_entry
->valid
);
1110 sw_context
->last_query_ctx
= ctx_entry
->res
;
1112 if (unlikely(new_query_bo
!= sw_context
->cur_query_bo
)) {
1114 if (unlikely(new_query_bo
->base
.num_pages
> 4)) {
1115 DRM_ERROR("Query buffer too large.\n");
1119 if (unlikely(sw_context
->cur_query_bo
!= NULL
)) {
1120 sw_context
->needs_post_query_barrier
= true;
1121 ret
= vmw_bo_to_validate_list(sw_context
,
1122 sw_context
->cur_query_bo
,
1123 dev_priv
->has_mob
, NULL
);
1124 if (unlikely(ret
!= 0))
1127 sw_context
->cur_query_bo
= new_query_bo
;
1129 ret
= vmw_bo_to_validate_list(sw_context
,
1130 dev_priv
->dummy_query_bo
,
1131 dev_priv
->has_mob
, NULL
);
1132 if (unlikely(ret
!= 0))
1142 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1144 * @dev_priv: The device private structure.
1145 * @sw_context: The software context used for this command submission batch.
1147 * This function will check if we're switching query buffers, and will then,
1148 * issue a dummy occlusion query wait used as a query barrier. When the fence
1149 * object following that query wait has signaled, we are sure that all
1150 * preceding queries have finished, and the old query buffer can be unpinned.
1151 * However, since both the new query buffer and the old one are fenced with
1152 * that fence, we can do an asynchronus unpin now, and be sure that the
1153 * old query buffer won't be moved until the fence has signaled.
1155 * As mentioned above, both the new - and old query buffers need to be fenced
1156 * using a sequence emitted *after* calling this function.
1158 static void vmw_query_bo_switch_commit(struct vmw_private
*dev_priv
,
1159 struct vmw_sw_context
*sw_context
)
1162 * The validate list should still hold references to all
1166 if (sw_context
->needs_post_query_barrier
) {
1167 struct vmw_res_cache_entry
*ctx_entry
=
1168 &sw_context
->res_cache
[vmw_res_context
];
1169 struct vmw_resource
*ctx
;
1172 BUG_ON(!ctx_entry
->valid
);
1173 ctx
= ctx_entry
->res
;
1175 ret
= vmw_fifo_emit_dummy_query(dev_priv
, ctx
->id
);
1177 if (unlikely(ret
!= 0))
1178 DRM_ERROR("Out of fifo space for dummy query.\n");
1181 if (dev_priv
->pinned_bo
!= sw_context
->cur_query_bo
) {
1182 if (dev_priv
->pinned_bo
) {
1183 vmw_bo_pin_reserved(dev_priv
->pinned_bo
, false);
1184 vmw_dmabuf_unreference(&dev_priv
->pinned_bo
);
1187 if (!sw_context
->needs_post_query_barrier
) {
1188 vmw_bo_pin_reserved(sw_context
->cur_query_bo
, true);
1191 * We pin also the dummy_query_bo buffer so that we
1192 * don't need to validate it when emitting
1193 * dummy queries in context destroy paths.
1196 if (!dev_priv
->dummy_query_bo_pinned
) {
1197 vmw_bo_pin_reserved(dev_priv
->dummy_query_bo
,
1199 dev_priv
->dummy_query_bo_pinned
= true;
1202 BUG_ON(sw_context
->last_query_ctx
== NULL
);
1203 dev_priv
->query_cid
= sw_context
->last_query_ctx
->id
;
1204 dev_priv
->query_cid_valid
= true;
1205 dev_priv
->pinned_bo
=
1206 vmw_dmabuf_reference(sw_context
->cur_query_bo
);
1212 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1213 * handle to a MOB id.
1215 * @dev_priv: Pointer to a device private structure.
1216 * @sw_context: The software context used for this command batch validation.
1217 * @id: Pointer to the user-space handle to be translated.
1218 * @vmw_bo_p: Points to a location that, on successful return will carry
1219 * a reference-counted pointer to the DMA buffer identified by the
1220 * user-space handle in @id.
1222 * This function saves information needed to translate a user-space buffer
1223 * handle to a MOB id. The translation does not take place immediately, but
1224 * during a call to vmw_apply_relocations(). This function builds a relocation
1225 * list and a list of buffers to validate. The former needs to be freed using
1226 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1227 * needs to be freed using vmw_clear_validations.
1229 static int vmw_translate_mob_ptr(struct vmw_private
*dev_priv
,
1230 struct vmw_sw_context
*sw_context
,
1232 struct vmw_dma_buffer
**vmw_bo_p
)
1234 struct vmw_dma_buffer
*vmw_bo
= NULL
;
1235 uint32_t handle
= *id
;
1236 struct vmw_relocation
*reloc
;
1239 ret
= vmw_user_dmabuf_lookup(sw_context
->fp
->tfile
, handle
, &vmw_bo
,
1241 if (unlikely(ret
!= 0)) {
1242 DRM_ERROR("Could not find or use MOB buffer.\n");
1247 if (unlikely(sw_context
->cur_reloc
>= VMWGFX_MAX_RELOCATIONS
)) {
1248 DRM_ERROR("Max number relocations per submission"
1254 reloc
= &sw_context
->relocs
[sw_context
->cur_reloc
++];
1255 reloc
->mob_loc
= id
;
1256 reloc
->location
= NULL
;
1258 ret
= vmw_bo_to_validate_list(sw_context
, vmw_bo
, true, &reloc
->index
);
1259 if (unlikely(ret
!= 0))
1266 vmw_dmabuf_unreference(&vmw_bo
);
1272 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1273 * handle to a valid SVGAGuestPtr
1275 * @dev_priv: Pointer to a device private structure.
1276 * @sw_context: The software context used for this command batch validation.
1277 * @ptr: Pointer to the user-space handle to be translated.
1278 * @vmw_bo_p: Points to a location that, on successful return will carry
1279 * a reference-counted pointer to the DMA buffer identified by the
1280 * user-space handle in @id.
1282 * This function saves information needed to translate a user-space buffer
1283 * handle to a valid SVGAGuestPtr. The translation does not take place
1284 * immediately, but during a call to vmw_apply_relocations().
1285 * This function builds a relocation list and a list of buffers to validate.
1286 * The former needs to be freed using either vmw_apply_relocations() or
1287 * vmw_free_relocations(). The latter needs to be freed using
1288 * vmw_clear_validations.
1290 static int vmw_translate_guest_ptr(struct vmw_private
*dev_priv
,
1291 struct vmw_sw_context
*sw_context
,
1293 struct vmw_dma_buffer
**vmw_bo_p
)
1295 struct vmw_dma_buffer
*vmw_bo
= NULL
;
1296 uint32_t handle
= ptr
->gmrId
;
1297 struct vmw_relocation
*reloc
;
1300 ret
= vmw_user_dmabuf_lookup(sw_context
->fp
->tfile
, handle
, &vmw_bo
,
1302 if (unlikely(ret
!= 0)) {
1303 DRM_ERROR("Could not find or use GMR region.\n");
1308 if (unlikely(sw_context
->cur_reloc
>= VMWGFX_MAX_RELOCATIONS
)) {
1309 DRM_ERROR("Max number relocations per submission"
1315 reloc
= &sw_context
->relocs
[sw_context
->cur_reloc
++];
1316 reloc
->location
= ptr
;
1318 ret
= vmw_bo_to_validate_list(sw_context
, vmw_bo
, false, &reloc
->index
);
1319 if (unlikely(ret
!= 0))
1326 vmw_dmabuf_unreference(&vmw_bo
);
1334 * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
1336 * @dev_priv: Pointer to a device private struct.
1337 * @sw_context: The software context used for this command submission.
1338 * @header: Pointer to the command header in the command stream.
1340 * This function adds the new query into the query COTABLE
1342 static int vmw_cmd_dx_define_query(struct vmw_private
*dev_priv
,
1343 struct vmw_sw_context
*sw_context
,
1344 SVGA3dCmdHeader
*header
)
1346 struct vmw_dx_define_query_cmd
{
1347 SVGA3dCmdHeader header
;
1348 SVGA3dCmdDXDefineQuery q
;
1352 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
1353 struct vmw_resource
*cotable_res
;
1356 if (ctx_node
== NULL
) {
1357 DRM_ERROR("DX Context not set for query.\n");
1361 cmd
= container_of(header
, struct vmw_dx_define_query_cmd
, header
);
1363 if (cmd
->q
.type
< SVGA3D_QUERYTYPE_MIN
||
1364 cmd
->q
.type
>= SVGA3D_QUERYTYPE_MAX
)
1367 cotable_res
= vmw_context_cotable(ctx_node
->res
, SVGA_COTABLE_DXQUERY
);
1368 ret
= vmw_cotable_notify(cotable_res
, cmd
->q
.queryId
);
1369 vmw_resource_unreference(&cotable_res
);
1377 * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
1379 * @dev_priv: Pointer to a device private struct.
1380 * @sw_context: The software context used for this command submission.
1381 * @header: Pointer to the command header in the command stream.
1383 * The query bind operation will eventually associate the query ID
1384 * with its backing MOB. In this function, we take the user mode
1385 * MOB ID and use vmw_translate_mob_ptr() to translate it to its
1386 * kernel mode equivalent.
1388 static int vmw_cmd_dx_bind_query(struct vmw_private
*dev_priv
,
1389 struct vmw_sw_context
*sw_context
,
1390 SVGA3dCmdHeader
*header
)
1392 struct vmw_dx_bind_query_cmd
{
1393 SVGA3dCmdHeader header
;
1394 SVGA3dCmdDXBindQuery q
;
1397 struct vmw_dma_buffer
*vmw_bo
;
1401 cmd
= container_of(header
, struct vmw_dx_bind_query_cmd
, header
);
1404 * Look up the buffer pointed to by q.mobid, put it on the relocation
1405 * list so its kernel mode MOB ID can be filled in later
1407 ret
= vmw_translate_mob_ptr(dev_priv
, sw_context
, &cmd
->q
.mobid
,
1413 sw_context
->dx_query_mob
= vmw_bo
;
1414 sw_context
->dx_query_ctx
= sw_context
->dx_ctx_node
->res
;
1416 vmw_dmabuf_unreference(&vmw_bo
);
1424 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
1426 * @dev_priv: Pointer to a device private struct.
1427 * @sw_context: The software context used for this command submission.
1428 * @header: Pointer to the command header in the command stream.
1430 static int vmw_cmd_begin_gb_query(struct vmw_private
*dev_priv
,
1431 struct vmw_sw_context
*sw_context
,
1432 SVGA3dCmdHeader
*header
)
1434 struct vmw_begin_gb_query_cmd
{
1435 SVGA3dCmdHeader header
;
1436 SVGA3dCmdBeginGBQuery q
;
1439 cmd
= container_of(header
, struct vmw_begin_gb_query_cmd
,
1442 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
1443 user_context_converter
, &cmd
->q
.cid
,
1448 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
1450 * @dev_priv: Pointer to a device private struct.
1451 * @sw_context: The software context used for this command submission.
1452 * @header: Pointer to the command header in the command stream.
1454 static int vmw_cmd_begin_query(struct vmw_private
*dev_priv
,
1455 struct vmw_sw_context
*sw_context
,
1456 SVGA3dCmdHeader
*header
)
1458 struct vmw_begin_query_cmd
{
1459 SVGA3dCmdHeader header
;
1460 SVGA3dCmdBeginQuery q
;
1463 cmd
= container_of(header
, struct vmw_begin_query_cmd
,
1466 if (unlikely(dev_priv
->has_mob
)) {
1468 SVGA3dCmdHeader header
;
1469 SVGA3dCmdBeginGBQuery q
;
1472 BUG_ON(sizeof(gb_cmd
) != sizeof(*cmd
));
1474 gb_cmd
.header
.id
= SVGA_3D_CMD_BEGIN_GB_QUERY
;
1475 gb_cmd
.header
.size
= cmd
->header
.size
;
1476 gb_cmd
.q
.cid
= cmd
->q
.cid
;
1477 gb_cmd
.q
.type
= cmd
->q
.type
;
1479 memcpy(cmd
, &gb_cmd
, sizeof(*cmd
));
1480 return vmw_cmd_begin_gb_query(dev_priv
, sw_context
, header
);
1483 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
1484 user_context_converter
, &cmd
->q
.cid
,
1489 * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
1491 * @dev_priv: Pointer to a device private struct.
1492 * @sw_context: The software context used for this command submission.
1493 * @header: Pointer to the command header in the command stream.
1495 static int vmw_cmd_end_gb_query(struct vmw_private
*dev_priv
,
1496 struct vmw_sw_context
*sw_context
,
1497 SVGA3dCmdHeader
*header
)
1499 struct vmw_dma_buffer
*vmw_bo
;
1500 struct vmw_query_cmd
{
1501 SVGA3dCmdHeader header
;
1502 SVGA3dCmdEndGBQuery q
;
1506 cmd
= container_of(header
, struct vmw_query_cmd
, header
);
1507 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1508 if (unlikely(ret
!= 0))
1511 ret
= vmw_translate_mob_ptr(dev_priv
, sw_context
,
1514 if (unlikely(ret
!= 0))
1517 ret
= vmw_query_bo_switch_prepare(dev_priv
, vmw_bo
, sw_context
);
1519 vmw_dmabuf_unreference(&vmw_bo
);
1524 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
1526 * @dev_priv: Pointer to a device private struct.
1527 * @sw_context: The software context used for this command submission.
1528 * @header: Pointer to the command header in the command stream.
1530 static int vmw_cmd_end_query(struct vmw_private
*dev_priv
,
1531 struct vmw_sw_context
*sw_context
,
1532 SVGA3dCmdHeader
*header
)
1534 struct vmw_dma_buffer
*vmw_bo
;
1535 struct vmw_query_cmd
{
1536 SVGA3dCmdHeader header
;
1537 SVGA3dCmdEndQuery q
;
1541 cmd
= container_of(header
, struct vmw_query_cmd
, header
);
1542 if (dev_priv
->has_mob
) {
1544 SVGA3dCmdHeader header
;
1545 SVGA3dCmdEndGBQuery q
;
1548 BUG_ON(sizeof(gb_cmd
) != sizeof(*cmd
));
1550 gb_cmd
.header
.id
= SVGA_3D_CMD_END_GB_QUERY
;
1551 gb_cmd
.header
.size
= cmd
->header
.size
;
1552 gb_cmd
.q
.cid
= cmd
->q
.cid
;
1553 gb_cmd
.q
.type
= cmd
->q
.type
;
1554 gb_cmd
.q
.mobid
= cmd
->q
.guestResult
.gmrId
;
1555 gb_cmd
.q
.offset
= cmd
->q
.guestResult
.offset
;
1557 memcpy(cmd
, &gb_cmd
, sizeof(*cmd
));
1558 return vmw_cmd_end_gb_query(dev_priv
, sw_context
, header
);
1561 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1562 if (unlikely(ret
!= 0))
1565 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
1566 &cmd
->q
.guestResult
,
1568 if (unlikely(ret
!= 0))
1571 ret
= vmw_query_bo_switch_prepare(dev_priv
, vmw_bo
, sw_context
);
1573 vmw_dmabuf_unreference(&vmw_bo
);
1578 * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
1580 * @dev_priv: Pointer to a device private struct.
1581 * @sw_context: The software context used for this command submission.
1582 * @header: Pointer to the command header in the command stream.
1584 static int vmw_cmd_wait_gb_query(struct vmw_private
*dev_priv
,
1585 struct vmw_sw_context
*sw_context
,
1586 SVGA3dCmdHeader
*header
)
1588 struct vmw_dma_buffer
*vmw_bo
;
1589 struct vmw_query_cmd
{
1590 SVGA3dCmdHeader header
;
1591 SVGA3dCmdWaitForGBQuery q
;
1595 cmd
= container_of(header
, struct vmw_query_cmd
, header
);
1596 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1597 if (unlikely(ret
!= 0))
1600 ret
= vmw_translate_mob_ptr(dev_priv
, sw_context
,
1603 if (unlikely(ret
!= 0))
1606 vmw_dmabuf_unreference(&vmw_bo
);
1611 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
1613 * @dev_priv: Pointer to a device private struct.
1614 * @sw_context: The software context used for this command submission.
1615 * @header: Pointer to the command header in the command stream.
1617 static int vmw_cmd_wait_query(struct vmw_private
*dev_priv
,
1618 struct vmw_sw_context
*sw_context
,
1619 SVGA3dCmdHeader
*header
)
1621 struct vmw_dma_buffer
*vmw_bo
;
1622 struct vmw_query_cmd
{
1623 SVGA3dCmdHeader header
;
1624 SVGA3dCmdWaitForQuery q
;
1628 cmd
= container_of(header
, struct vmw_query_cmd
, header
);
1629 if (dev_priv
->has_mob
) {
1631 SVGA3dCmdHeader header
;
1632 SVGA3dCmdWaitForGBQuery q
;
1635 BUG_ON(sizeof(gb_cmd
) != sizeof(*cmd
));
1637 gb_cmd
.header
.id
= SVGA_3D_CMD_WAIT_FOR_GB_QUERY
;
1638 gb_cmd
.header
.size
= cmd
->header
.size
;
1639 gb_cmd
.q
.cid
= cmd
->q
.cid
;
1640 gb_cmd
.q
.type
= cmd
->q
.type
;
1641 gb_cmd
.q
.mobid
= cmd
->q
.guestResult
.gmrId
;
1642 gb_cmd
.q
.offset
= cmd
->q
.guestResult
.offset
;
1644 memcpy(cmd
, &gb_cmd
, sizeof(*cmd
));
1645 return vmw_cmd_wait_gb_query(dev_priv
, sw_context
, header
);
1648 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1649 if (unlikely(ret
!= 0))
1652 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
1653 &cmd
->q
.guestResult
,
1655 if (unlikely(ret
!= 0))
1658 vmw_dmabuf_unreference(&vmw_bo
);
1662 static int vmw_cmd_dma(struct vmw_private
*dev_priv
,
1663 struct vmw_sw_context
*sw_context
,
1664 SVGA3dCmdHeader
*header
)
1666 struct vmw_dma_buffer
*vmw_bo
= NULL
;
1667 struct vmw_surface
*srf
= NULL
;
1668 struct vmw_dma_cmd
{
1669 SVGA3dCmdHeader header
;
1670 SVGA3dCmdSurfaceDMA dma
;
1673 SVGA3dCmdSurfaceDMASuffix
*suffix
;
1676 cmd
= container_of(header
, struct vmw_dma_cmd
, header
);
1677 suffix
= (SVGA3dCmdSurfaceDMASuffix
*)((unsigned long) &cmd
->dma
+
1678 header
->size
- sizeof(*suffix
));
1680 /* Make sure device and verifier stays in sync. */
1681 if (unlikely(suffix
->suffixSize
!= sizeof(*suffix
))) {
1682 DRM_ERROR("Invalid DMA suffix size.\n");
1686 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
1687 &cmd
->dma
.guest
.ptr
,
1689 if (unlikely(ret
!= 0))
1692 /* Make sure DMA doesn't cross BO boundaries. */
1693 bo_size
= vmw_bo
->base
.num_pages
* PAGE_SIZE
;
1694 if (unlikely(cmd
->dma
.guest
.ptr
.offset
> bo_size
)) {
1695 DRM_ERROR("Invalid DMA offset.\n");
1699 bo_size
-= cmd
->dma
.guest
.ptr
.offset
;
1700 if (unlikely(suffix
->maximumOffset
> bo_size
))
1701 suffix
->maximumOffset
= bo_size
;
1703 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1704 user_surface_converter
, &cmd
->dma
.host
.sid
,
1706 if (unlikely(ret
!= 0)) {
1707 if (unlikely(ret
!= -ERESTARTSYS
))
1708 DRM_ERROR("could not find surface for DMA.\n");
1709 goto out_no_surface
;
1712 srf
= vmw_res_to_srf(sw_context
->res_cache
[vmw_res_surface
].res
);
1714 vmw_kms_cursor_snoop(srf
, sw_context
->fp
->tfile
, &vmw_bo
->base
,
1718 vmw_dmabuf_unreference(&vmw_bo
);
1722 static int vmw_cmd_draw(struct vmw_private
*dev_priv
,
1723 struct vmw_sw_context
*sw_context
,
1724 SVGA3dCmdHeader
*header
)
1726 struct vmw_draw_cmd
{
1727 SVGA3dCmdHeader header
;
1728 SVGA3dCmdDrawPrimitives body
;
1730 SVGA3dVertexDecl
*decl
= (SVGA3dVertexDecl
*)(
1731 (unsigned long)header
+ sizeof(*cmd
));
1732 SVGA3dPrimitiveRange
*range
;
1737 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1738 if (unlikely(ret
!= 0))
1741 cmd
= container_of(header
, struct vmw_draw_cmd
, header
);
1742 maxnum
= (header
->size
- sizeof(cmd
->body
)) / sizeof(*decl
);
1744 if (unlikely(cmd
->body
.numVertexDecls
> maxnum
)) {
1745 DRM_ERROR("Illegal number of vertex declarations.\n");
1749 for (i
= 0; i
< cmd
->body
.numVertexDecls
; ++i
, ++decl
) {
1750 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1751 user_surface_converter
,
1752 &decl
->array
.surfaceId
, NULL
);
1753 if (unlikely(ret
!= 0))
1757 maxnum
= (header
->size
- sizeof(cmd
->body
) -
1758 cmd
->body
.numVertexDecls
* sizeof(*decl
)) / sizeof(*range
);
1759 if (unlikely(cmd
->body
.numRanges
> maxnum
)) {
1760 DRM_ERROR("Illegal number of index ranges.\n");
1764 range
= (SVGA3dPrimitiveRange
*) decl
;
1765 for (i
= 0; i
< cmd
->body
.numRanges
; ++i
, ++range
) {
1766 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1767 user_surface_converter
,
1768 &range
->indexArray
.surfaceId
, NULL
);
1769 if (unlikely(ret
!= 0))
1776 static int vmw_cmd_tex_state(struct vmw_private
*dev_priv
,
1777 struct vmw_sw_context
*sw_context
,
1778 SVGA3dCmdHeader
*header
)
1780 struct vmw_tex_state_cmd
{
1781 SVGA3dCmdHeader header
;
1782 SVGA3dCmdSetTextureState state
;
1785 SVGA3dTextureState
*last_state
= (SVGA3dTextureState
*)
1786 ((unsigned long) header
+ header
->size
+ sizeof(header
));
1787 SVGA3dTextureState
*cur_state
= (SVGA3dTextureState
*)
1788 ((unsigned long) header
+ sizeof(struct vmw_tex_state_cmd
));
1789 struct vmw_resource_val_node
*ctx_node
;
1790 struct vmw_resource_val_node
*res_node
;
1793 cmd
= container_of(header
, struct vmw_tex_state_cmd
,
1796 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
1797 user_context_converter
, &cmd
->state
.cid
,
1799 if (unlikely(ret
!= 0))
1802 for (; cur_state
< last_state
; ++cur_state
) {
1803 if (likely(cur_state
->name
!= SVGA3D_TS_BIND_TEXTURE
))
1806 if (cur_state
->stage
>= SVGA3D_NUM_TEXTURE_UNITS
) {
1807 DRM_ERROR("Illegal texture/sampler unit %u.\n",
1808 (unsigned) cur_state
->stage
);
1812 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1813 user_surface_converter
,
1814 &cur_state
->value
, &res_node
);
1815 if (unlikely(ret
!= 0))
1818 if (dev_priv
->has_mob
) {
1819 struct vmw_ctx_bindinfo_tex binding
;
1821 binding
.bi
.ctx
= ctx_node
->res
;
1822 binding
.bi
.res
= res_node
? res_node
->res
: NULL
;
1823 binding
.bi
.bt
= vmw_ctx_binding_tex
;
1824 binding
.texture_stage
= cur_state
->stage
;
1825 vmw_binding_add(ctx_node
->staged_bindings
, &binding
.bi
,
1826 0, binding
.texture_stage
);
1833 static int vmw_cmd_check_define_gmrfb(struct vmw_private
*dev_priv
,
1834 struct vmw_sw_context
*sw_context
,
1837 struct vmw_dma_buffer
*vmw_bo
;
1842 SVGAFifoCmdDefineGMRFB body
;
1845 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
1848 if (unlikely(ret
!= 0))
1851 vmw_dmabuf_unreference(&vmw_bo
);
1858 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1861 * @dev_priv: Pointer to a device private struct.
1862 * @sw_context: The software context being used for this batch.
1863 * @val_node: The validation node representing the resource.
1864 * @buf_id: Pointer to the user-space backup buffer handle in the command
1866 * @backup_offset: Offset of backup into MOB.
1868 * This function prepares for registering a switch of backup buffers
1869 * in the resource metadata just prior to unreserving. It's basically a wrapper
1870 * around vmw_cmd_res_switch_backup with a different interface.
1872 static int vmw_cmd_res_switch_backup(struct vmw_private
*dev_priv
,
1873 struct vmw_sw_context
*sw_context
,
1874 struct vmw_resource_val_node
*val_node
,
1876 unsigned long backup_offset
)
1878 struct vmw_dma_buffer
*dma_buf
;
1881 ret
= vmw_translate_mob_ptr(dev_priv
, sw_context
, buf_id
, &dma_buf
);
1885 val_node
->switching_backup
= true;
1886 if (val_node
->first_usage
)
1887 val_node
->no_buffer_needed
= true;
1889 vmw_dmabuf_unreference(&val_node
->new_backup
);
1890 val_node
->new_backup
= dma_buf
;
1891 val_node
->new_backup_offset
= backup_offset
;
1898 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1900 * @dev_priv: Pointer to a device private struct.
1901 * @sw_context: The software context being used for this batch.
1902 * @res_type: The resource type.
1903 * @converter: Information about user-space binding for this resource type.
1904 * @res_id: Pointer to the user-space resource handle in the command stream.
1905 * @buf_id: Pointer to the user-space backup buffer handle in the command
1907 * @backup_offset: Offset of backup into MOB.
1909 * This function prepares for registering a switch of backup buffers
1910 * in the resource metadata just prior to unreserving. It's basically a wrapper
1911 * around vmw_cmd_res_switch_backup with a different interface.
1913 static int vmw_cmd_switch_backup(struct vmw_private
*dev_priv
,
1914 struct vmw_sw_context
*sw_context
,
1915 enum vmw_res_type res_type
,
1916 const struct vmw_user_resource_conv
1920 unsigned long backup_offset
)
1922 struct vmw_resource_val_node
*val_node
;
1925 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, res_type
,
1926 converter
, res_id
, &val_node
);
1930 return vmw_cmd_res_switch_backup(dev_priv
, sw_context
, val_node
,
1931 buf_id
, backup_offset
);
1935 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1938 * @dev_priv: Pointer to a device private struct.
1939 * @sw_context: The software context being used for this batch.
1940 * @header: Pointer to the command header in the command stream.
1942 static int vmw_cmd_bind_gb_surface(struct vmw_private
*dev_priv
,
1943 struct vmw_sw_context
*sw_context
,
1944 SVGA3dCmdHeader
*header
)
1946 struct vmw_bind_gb_surface_cmd
{
1947 SVGA3dCmdHeader header
;
1948 SVGA3dCmdBindGBSurface body
;
1951 cmd
= container_of(header
, struct vmw_bind_gb_surface_cmd
, header
);
1953 return vmw_cmd_switch_backup(dev_priv
, sw_context
, vmw_res_surface
,
1954 user_surface_converter
,
1955 &cmd
->body
.sid
, &cmd
->body
.mobid
,
1960 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1963 * @dev_priv: Pointer to a device private struct.
1964 * @sw_context: The software context being used for this batch.
1965 * @header: Pointer to the command header in the command stream.
1967 static int vmw_cmd_update_gb_image(struct vmw_private
*dev_priv
,
1968 struct vmw_sw_context
*sw_context
,
1969 SVGA3dCmdHeader
*header
)
1971 struct vmw_gb_surface_cmd
{
1972 SVGA3dCmdHeader header
;
1973 SVGA3dCmdUpdateGBImage body
;
1976 cmd
= container_of(header
, struct vmw_gb_surface_cmd
, header
);
1978 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1979 user_surface_converter
,
1980 &cmd
->body
.image
.sid
, NULL
);
1984 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1987 * @dev_priv: Pointer to a device private struct.
1988 * @sw_context: The software context being used for this batch.
1989 * @header: Pointer to the command header in the command stream.
1991 static int vmw_cmd_update_gb_surface(struct vmw_private
*dev_priv
,
1992 struct vmw_sw_context
*sw_context
,
1993 SVGA3dCmdHeader
*header
)
1995 struct vmw_gb_surface_cmd
{
1996 SVGA3dCmdHeader header
;
1997 SVGA3dCmdUpdateGBSurface body
;
2000 cmd
= container_of(header
, struct vmw_gb_surface_cmd
, header
);
2002 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2003 user_surface_converter
,
2004 &cmd
->body
.sid
, NULL
);
2008 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
2011 * @dev_priv: Pointer to a device private struct.
2012 * @sw_context: The software context being used for this batch.
2013 * @header: Pointer to the command header in the command stream.
2015 static int vmw_cmd_readback_gb_image(struct vmw_private
*dev_priv
,
2016 struct vmw_sw_context
*sw_context
,
2017 SVGA3dCmdHeader
*header
)
2019 struct vmw_gb_surface_cmd
{
2020 SVGA3dCmdHeader header
;
2021 SVGA3dCmdReadbackGBImage body
;
2024 cmd
= container_of(header
, struct vmw_gb_surface_cmd
, header
);
2026 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2027 user_surface_converter
,
2028 &cmd
->body
.image
.sid
, NULL
);
2032 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
2035 * @dev_priv: Pointer to a device private struct.
2036 * @sw_context: The software context being used for this batch.
2037 * @header: Pointer to the command header in the command stream.
2039 static int vmw_cmd_readback_gb_surface(struct vmw_private
*dev_priv
,
2040 struct vmw_sw_context
*sw_context
,
2041 SVGA3dCmdHeader
*header
)
2043 struct vmw_gb_surface_cmd
{
2044 SVGA3dCmdHeader header
;
2045 SVGA3dCmdReadbackGBSurface body
;
2048 cmd
= container_of(header
, struct vmw_gb_surface_cmd
, header
);
2050 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2051 user_surface_converter
,
2052 &cmd
->body
.sid
, NULL
);
2056 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
2059 * @dev_priv: Pointer to a device private struct.
2060 * @sw_context: The software context being used for this batch.
2061 * @header: Pointer to the command header in the command stream.
2063 static int vmw_cmd_invalidate_gb_image(struct vmw_private
*dev_priv
,
2064 struct vmw_sw_context
*sw_context
,
2065 SVGA3dCmdHeader
*header
)
2067 struct vmw_gb_surface_cmd
{
2068 SVGA3dCmdHeader header
;
2069 SVGA3dCmdInvalidateGBImage body
;
2072 cmd
= container_of(header
, struct vmw_gb_surface_cmd
, header
);
2074 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2075 user_surface_converter
,
2076 &cmd
->body
.image
.sid
, NULL
);
2080 * vmw_cmd_invalidate_gb_surface - Validate an
2081 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
2083 * @dev_priv: Pointer to a device private struct.
2084 * @sw_context: The software context being used for this batch.
2085 * @header: Pointer to the command header in the command stream.
2087 static int vmw_cmd_invalidate_gb_surface(struct vmw_private
*dev_priv
,
2088 struct vmw_sw_context
*sw_context
,
2089 SVGA3dCmdHeader
*header
)
2091 struct vmw_gb_surface_cmd
{
2092 SVGA3dCmdHeader header
;
2093 SVGA3dCmdInvalidateGBSurface body
;
2096 cmd
= container_of(header
, struct vmw_gb_surface_cmd
, header
);
2098 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2099 user_surface_converter
,
2100 &cmd
->body
.sid
, NULL
);
2105 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
2108 * @dev_priv: Pointer to a device private struct.
2109 * @sw_context: The software context being used for this batch.
2110 * @header: Pointer to the command header in the command stream.
2112 static int vmw_cmd_shader_define(struct vmw_private
*dev_priv
,
2113 struct vmw_sw_context
*sw_context
,
2114 SVGA3dCmdHeader
*header
)
2116 struct vmw_shader_define_cmd
{
2117 SVGA3dCmdHeader header
;
2118 SVGA3dCmdDefineShader body
;
2122 struct vmw_resource_val_node
*val
;
2124 cmd
= container_of(header
, struct vmw_shader_define_cmd
,
2127 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
2128 user_context_converter
, &cmd
->body
.cid
,
2130 if (unlikely(ret
!= 0))
2133 if (unlikely(!dev_priv
->has_mob
))
2136 size
= cmd
->header
.size
- sizeof(cmd
->body
);
2137 ret
= vmw_compat_shader_add(dev_priv
,
2138 vmw_context_res_man(val
->res
),
2139 cmd
->body
.shid
, cmd
+ 1,
2140 cmd
->body
.type
, size
,
2141 &sw_context
->staged_cmd_res
);
2142 if (unlikely(ret
!= 0))
2145 return vmw_resource_relocation_add(&sw_context
->res_relocations
,
2146 NULL
, &cmd
->header
.id
-
2147 sw_context
->buf_start
);
2153 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
2156 * @dev_priv: Pointer to a device private struct.
2157 * @sw_context: The software context being used for this batch.
2158 * @header: Pointer to the command header in the command stream.
2160 static int vmw_cmd_shader_destroy(struct vmw_private
*dev_priv
,
2161 struct vmw_sw_context
*sw_context
,
2162 SVGA3dCmdHeader
*header
)
2164 struct vmw_shader_destroy_cmd
{
2165 SVGA3dCmdHeader header
;
2166 SVGA3dCmdDestroyShader body
;
2169 struct vmw_resource_val_node
*val
;
2171 cmd
= container_of(header
, struct vmw_shader_destroy_cmd
,
2174 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
2175 user_context_converter
, &cmd
->body
.cid
,
2177 if (unlikely(ret
!= 0))
2180 if (unlikely(!dev_priv
->has_mob
))
2183 ret
= vmw_shader_remove(vmw_context_res_man(val
->res
),
2186 &sw_context
->staged_cmd_res
);
2187 if (unlikely(ret
!= 0))
2190 return vmw_resource_relocation_add(&sw_context
->res_relocations
,
2191 NULL
, &cmd
->header
.id
-
2192 sw_context
->buf_start
);
2198 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
2201 * @dev_priv: Pointer to a device private struct.
2202 * @sw_context: The software context being used for this batch.
2203 * @header: Pointer to the command header in the command stream.
2205 static int vmw_cmd_set_shader(struct vmw_private
*dev_priv
,
2206 struct vmw_sw_context
*sw_context
,
2207 SVGA3dCmdHeader
*header
)
2209 struct vmw_set_shader_cmd
{
2210 SVGA3dCmdHeader header
;
2211 SVGA3dCmdSetShader body
;
2213 struct vmw_resource_val_node
*ctx_node
, *res_node
= NULL
;
2214 struct vmw_ctx_bindinfo_shader binding
;
2215 struct vmw_resource
*res
= NULL
;
2218 cmd
= container_of(header
, struct vmw_set_shader_cmd
,
2221 if (cmd
->body
.type
>= SVGA3D_SHADERTYPE_PREDX_MAX
) {
2222 DRM_ERROR("Illegal shader type %u.\n",
2223 (unsigned) cmd
->body
.type
);
2227 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
2228 user_context_converter
, &cmd
->body
.cid
,
2230 if (unlikely(ret
!= 0))
2233 if (!dev_priv
->has_mob
)
2236 if (cmd
->body
.shid
!= SVGA3D_INVALID_ID
) {
2237 res
= vmw_shader_lookup(vmw_context_res_man(ctx_node
->res
),
2242 ret
= vmw_cmd_res_reloc_add(dev_priv
, sw_context
,
2243 &cmd
->body
.shid
, res
,
2245 vmw_resource_unreference(&res
);
2246 if (unlikely(ret
!= 0))
2252 ret
= vmw_cmd_res_check(dev_priv
, sw_context
,
2254 user_shader_converter
,
2255 &cmd
->body
.shid
, &res_node
);
2256 if (unlikely(ret
!= 0))
2260 binding
.bi
.ctx
= ctx_node
->res
;
2261 binding
.bi
.res
= res_node
? res_node
->res
: NULL
;
2262 binding
.bi
.bt
= vmw_ctx_binding_shader
;
2263 binding
.shader_slot
= cmd
->body
.type
- SVGA3D_SHADERTYPE_MIN
;
2264 vmw_binding_add(ctx_node
->staged_bindings
, &binding
.bi
,
2265 binding
.shader_slot
, 0);
2270 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2273 * @dev_priv: Pointer to a device private struct.
2274 * @sw_context: The software context being used for this batch.
2275 * @header: Pointer to the command header in the command stream.
2277 static int vmw_cmd_set_shader_const(struct vmw_private
*dev_priv
,
2278 struct vmw_sw_context
*sw_context
,
2279 SVGA3dCmdHeader
*header
)
2281 struct vmw_set_shader_const_cmd
{
2282 SVGA3dCmdHeader header
;
2283 SVGA3dCmdSetShaderConst body
;
2287 cmd
= container_of(header
, struct vmw_set_shader_const_cmd
,
2290 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
2291 user_context_converter
, &cmd
->body
.cid
,
2293 if (unlikely(ret
!= 0))
2296 if (dev_priv
->has_mob
)
2297 header
->id
= SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE
;
2303 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2306 * @dev_priv: Pointer to a device private struct.
2307 * @sw_context: The software context being used for this batch.
2308 * @header: Pointer to the command header in the command stream.
2310 static int vmw_cmd_bind_gb_shader(struct vmw_private
*dev_priv
,
2311 struct vmw_sw_context
*sw_context
,
2312 SVGA3dCmdHeader
*header
)
2314 struct vmw_bind_gb_shader_cmd
{
2315 SVGA3dCmdHeader header
;
2316 SVGA3dCmdBindGBShader body
;
2319 cmd
= container_of(header
, struct vmw_bind_gb_shader_cmd
,
2322 return vmw_cmd_switch_backup(dev_priv
, sw_context
, vmw_res_shader
,
2323 user_shader_converter
,
2324 &cmd
->body
.shid
, &cmd
->body
.mobid
,
2325 cmd
->body
.offsetInBytes
);
2329 * vmw_cmd_dx_set_single_constant_buffer - Validate an
2330 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2332 * @dev_priv: Pointer to a device private struct.
2333 * @sw_context: The software context being used for this batch.
2334 * @header: Pointer to the command header in the command stream.
2337 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private
*dev_priv
,
2338 struct vmw_sw_context
*sw_context
,
2339 SVGA3dCmdHeader
*header
)
2342 SVGA3dCmdHeader header
;
2343 SVGA3dCmdDXSetSingleConstantBuffer body
;
2345 struct vmw_resource_val_node
*res_node
= NULL
;
2346 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
2347 struct vmw_ctx_bindinfo_cb binding
;
2350 if (unlikely(ctx_node
== NULL
)) {
2351 DRM_ERROR("DX Context not set.\n");
2355 cmd
= container_of(header
, typeof(*cmd
), header
);
2356 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2357 user_surface_converter
,
2358 &cmd
->body
.sid
, &res_node
);
2359 if (unlikely(ret
!= 0))
2362 binding
.bi
.ctx
= ctx_node
->res
;
2363 binding
.bi
.res
= res_node
? res_node
->res
: NULL
;
2364 binding
.bi
.bt
= vmw_ctx_binding_cb
;
2365 binding
.shader_slot
= cmd
->body
.type
- SVGA3D_SHADERTYPE_MIN
;
2366 binding
.offset
= cmd
->body
.offsetInBytes
;
2367 binding
.size
= cmd
->body
.sizeInBytes
;
2368 binding
.slot
= cmd
->body
.slot
;
2370 if (binding
.shader_slot
>= SVGA3D_NUM_SHADERTYPE_DX10
||
2371 binding
.slot
>= SVGA3D_DX_MAX_CONSTBUFFERS
) {
2372 DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2373 (unsigned) cmd
->body
.type
,
2374 (unsigned) binding
.slot
);
2378 vmw_binding_add(ctx_node
->staged_bindings
, &binding
.bi
,
2379 binding
.shader_slot
, binding
.slot
);
2385 * vmw_cmd_dx_set_shader_res - Validate an
2386 * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2388 * @dev_priv: Pointer to a device private struct.
2389 * @sw_context: The software context being used for this batch.
2390 * @header: Pointer to the command header in the command stream.
2392 static int vmw_cmd_dx_set_shader_res(struct vmw_private
*dev_priv
,
2393 struct vmw_sw_context
*sw_context
,
2394 SVGA3dCmdHeader
*header
)
2397 SVGA3dCmdHeader header
;
2398 SVGA3dCmdDXSetShaderResources body
;
2399 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2400 u32 num_sr_view
= (cmd
->header
.size
- sizeof(cmd
->body
)) /
2401 sizeof(SVGA3dShaderResourceViewId
);
2403 if ((u64
) cmd
->body
.startView
+ (u64
) num_sr_view
>
2404 (u64
) SVGA3D_DX_MAX_SRVIEWS
||
2405 cmd
->body
.type
>= SVGA3D_SHADERTYPE_DX10_MAX
) {
2406 DRM_ERROR("Invalid shader binding.\n");
2410 return vmw_view_bindings_add(sw_context
, vmw_view_sr
,
2412 cmd
->body
.type
- SVGA3D_SHADERTYPE_MIN
,
2413 (void *) &cmd
[1], num_sr_view
,
2414 cmd
->body
.startView
);
2418 * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2421 * @dev_priv: Pointer to a device private struct.
2422 * @sw_context: The software context being used for this batch.
2423 * @header: Pointer to the command header in the command stream.
2425 static int vmw_cmd_dx_set_shader(struct vmw_private
*dev_priv
,
2426 struct vmw_sw_context
*sw_context
,
2427 SVGA3dCmdHeader
*header
)
2430 SVGA3dCmdHeader header
;
2431 SVGA3dCmdDXSetShader body
;
2433 struct vmw_resource
*res
= NULL
;
2434 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
2435 struct vmw_ctx_bindinfo_shader binding
;
2438 if (unlikely(ctx_node
== NULL
)) {
2439 DRM_ERROR("DX Context not set.\n");
2443 cmd
= container_of(header
, typeof(*cmd
), header
);
2445 if (cmd
->body
.type
>= SVGA3D_SHADERTYPE_DX10_MAX
) {
2446 DRM_ERROR("Illegal shader type %u.\n",
2447 (unsigned) cmd
->body
.type
);
2451 if (cmd
->body
.shaderId
!= SVGA3D_INVALID_ID
) {
2452 res
= vmw_shader_lookup(sw_context
->man
, cmd
->body
.shaderId
, 0);
2454 DRM_ERROR("Could not find shader for binding.\n");
2455 return PTR_ERR(res
);
2458 ret
= vmw_resource_val_add(sw_context
, res
, NULL
);
2463 binding
.bi
.ctx
= ctx_node
->res
;
2464 binding
.bi
.res
= res
;
2465 binding
.bi
.bt
= vmw_ctx_binding_dx_shader
;
2466 binding
.shader_slot
= cmd
->body
.type
- SVGA3D_SHADERTYPE_MIN
;
2468 vmw_binding_add(ctx_node
->staged_bindings
, &binding
.bi
,
2469 binding
.shader_slot
, 0);
2472 vmw_resource_unreference(&res
);
2478 * vmw_cmd_dx_set_vertex_buffers - Validates an
2479 * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2481 * @dev_priv: Pointer to a device private struct.
2482 * @sw_context: The software context being used for this batch.
2483 * @header: Pointer to the command header in the command stream.
2485 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private
*dev_priv
,
2486 struct vmw_sw_context
*sw_context
,
2487 SVGA3dCmdHeader
*header
)
2489 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
2490 struct vmw_ctx_bindinfo_vb binding
;
2491 struct vmw_resource_val_node
*res_node
;
2493 SVGA3dCmdHeader header
;
2494 SVGA3dCmdDXSetVertexBuffers body
;
2495 SVGA3dVertexBuffer buf
[];
2499 if (unlikely(ctx_node
== NULL
)) {
2500 DRM_ERROR("DX Context not set.\n");
2504 cmd
= container_of(header
, typeof(*cmd
), header
);
2505 num
= (cmd
->header
.size
- sizeof(cmd
->body
)) /
2506 sizeof(SVGA3dVertexBuffer
);
2507 if ((u64
)num
+ (u64
)cmd
->body
.startBuffer
>
2508 (u64
)SVGA3D_DX_MAX_VERTEXBUFFERS
) {
2509 DRM_ERROR("Invalid number of vertex buffers.\n");
2513 for (i
= 0; i
< num
; i
++) {
2514 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2515 user_surface_converter
,
2516 &cmd
->buf
[i
].sid
, &res_node
);
2517 if (unlikely(ret
!= 0))
2520 binding
.bi
.ctx
= ctx_node
->res
;
2521 binding
.bi
.bt
= vmw_ctx_binding_vb
;
2522 binding
.bi
.res
= ((res_node
) ? res_node
->res
: NULL
);
2523 binding
.offset
= cmd
->buf
[i
].offset
;
2524 binding
.stride
= cmd
->buf
[i
].stride
;
2525 binding
.slot
= i
+ cmd
->body
.startBuffer
;
2527 vmw_binding_add(ctx_node
->staged_bindings
, &binding
.bi
,
2535 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2536 * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
2538 * @dev_priv: Pointer to a device private struct.
2539 * @sw_context: The software context being used for this batch.
2540 * @header: Pointer to the command header in the command stream.
2542 static int vmw_cmd_dx_set_index_buffer(struct vmw_private
*dev_priv
,
2543 struct vmw_sw_context
*sw_context
,
2544 SVGA3dCmdHeader
*header
)
2546 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
2547 struct vmw_ctx_bindinfo_ib binding
;
2548 struct vmw_resource_val_node
*res_node
;
2550 SVGA3dCmdHeader header
;
2551 SVGA3dCmdDXSetIndexBuffer body
;
2555 if (unlikely(ctx_node
== NULL
)) {
2556 DRM_ERROR("DX Context not set.\n");
2560 cmd
= container_of(header
, typeof(*cmd
), header
);
2561 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2562 user_surface_converter
,
2563 &cmd
->body
.sid
, &res_node
);
2564 if (unlikely(ret
!= 0))
2567 binding
.bi
.ctx
= ctx_node
->res
;
2568 binding
.bi
.res
= ((res_node
) ? res_node
->res
: NULL
);
2569 binding
.bi
.bt
= vmw_ctx_binding_ib
;
2570 binding
.offset
= cmd
->body
.offset
;
2571 binding
.format
= cmd
->body
.format
;
2573 vmw_binding_add(ctx_node
->staged_bindings
, &binding
.bi
, 0, 0);
2579 * vmw_cmd_dx_set_rendertarget - Validate an
2580 * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2582 * @dev_priv: Pointer to a device private struct.
2583 * @sw_context: The software context being used for this batch.
2584 * @header: Pointer to the command header in the command stream.
2586 static int vmw_cmd_dx_set_rendertargets(struct vmw_private
*dev_priv
,
2587 struct vmw_sw_context
*sw_context
,
2588 SVGA3dCmdHeader
*header
)
2591 SVGA3dCmdHeader header
;
2592 SVGA3dCmdDXSetRenderTargets body
;
2593 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2595 u32 num_rt_view
= (cmd
->header
.size
- sizeof(cmd
->body
)) /
2596 sizeof(SVGA3dRenderTargetViewId
);
2598 if (num_rt_view
> SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS
) {
2599 DRM_ERROR("Invalid DX Rendertarget binding.\n");
2603 ret
= vmw_view_bindings_add(sw_context
, vmw_view_ds
,
2604 vmw_ctx_binding_ds
, 0,
2605 &cmd
->body
.depthStencilViewId
, 1, 0);
2609 return vmw_view_bindings_add(sw_context
, vmw_view_rt
,
2610 vmw_ctx_binding_dx_rt
, 0,
2611 (void *)&cmd
[1], num_rt_view
, 0);
2615 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2616 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2618 * @dev_priv: Pointer to a device private struct.
2619 * @sw_context: The software context being used for this batch.
2620 * @header: Pointer to the command header in the command stream.
2622 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private
*dev_priv
,
2623 struct vmw_sw_context
*sw_context
,
2624 SVGA3dCmdHeader
*header
)
2627 SVGA3dCmdHeader header
;
2628 SVGA3dCmdDXClearRenderTargetView body
;
2629 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2631 return vmw_view_id_val_add(sw_context
, vmw_view_rt
,
2632 cmd
->body
.renderTargetViewId
);
2636 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2637 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2639 * @dev_priv: Pointer to a device private struct.
2640 * @sw_context: The software context being used for this batch.
2641 * @header: Pointer to the command header in the command stream.
2643 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private
*dev_priv
,
2644 struct vmw_sw_context
*sw_context
,
2645 SVGA3dCmdHeader
*header
)
2648 SVGA3dCmdHeader header
;
2649 SVGA3dCmdDXClearDepthStencilView body
;
2650 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2652 return vmw_view_id_val_add(sw_context
, vmw_view_ds
,
2653 cmd
->body
.depthStencilViewId
);
2656 static int vmw_cmd_dx_view_define(struct vmw_private
*dev_priv
,
2657 struct vmw_sw_context
*sw_context
,
2658 SVGA3dCmdHeader
*header
)
2660 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
2661 struct vmw_resource_val_node
*srf_node
;
2662 struct vmw_resource
*res
;
2663 enum vmw_view_type view_type
;
2666 * This is based on the fact that all affected define commands have
2667 * the same initial command body layout.
2670 SVGA3dCmdHeader header
;
2675 if (unlikely(ctx_node
== NULL
)) {
2676 DRM_ERROR("DX Context not set.\n");
2680 view_type
= vmw_view_cmd_to_type(header
->id
);
2681 cmd
= container_of(header
, typeof(*cmd
), header
);
2682 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2683 user_surface_converter
,
2684 &cmd
->sid
, &srf_node
);
2685 if (unlikely(ret
!= 0))
2688 res
= vmw_context_cotable(ctx_node
->res
, vmw_view_cotables
[view_type
]);
2689 ret
= vmw_cotable_notify(res
, cmd
->defined_id
);
2690 vmw_resource_unreference(&res
);
2691 if (unlikely(ret
!= 0))
2694 return vmw_view_add(sw_context
->man
,
2700 header
->size
+ sizeof(*header
),
2701 &sw_context
->staged_cmd_res
);
2705 * vmw_cmd_dx_set_so_targets - Validate an
2706 * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2708 * @dev_priv: Pointer to a device private struct.
2709 * @sw_context: The software context being used for this batch.
2710 * @header: Pointer to the command header in the command stream.
2712 static int vmw_cmd_dx_set_so_targets(struct vmw_private
*dev_priv
,
2713 struct vmw_sw_context
*sw_context
,
2714 SVGA3dCmdHeader
*header
)
2716 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
2717 struct vmw_ctx_bindinfo_so binding
;
2718 struct vmw_resource_val_node
*res_node
;
2720 SVGA3dCmdHeader header
;
2721 SVGA3dCmdDXSetSOTargets body
;
2722 SVGA3dSoTarget targets
[];
2726 if (unlikely(ctx_node
== NULL
)) {
2727 DRM_ERROR("DX Context not set.\n");
2731 cmd
= container_of(header
, typeof(*cmd
), header
);
2732 num
= (cmd
->header
.size
- sizeof(cmd
->body
)) /
2733 sizeof(SVGA3dSoTarget
);
2735 if (num
> SVGA3D_DX_MAX_SOTARGETS
) {
2736 DRM_ERROR("Invalid DX SO binding.\n");
2740 for (i
= 0; i
< num
; i
++) {
2741 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2742 user_surface_converter
,
2743 &cmd
->targets
[i
].sid
, &res_node
);
2744 if (unlikely(ret
!= 0))
2747 binding
.bi
.ctx
= ctx_node
->res
;
2748 binding
.bi
.res
= ((res_node
) ? res_node
->res
: NULL
);
2749 binding
.bi
.bt
= vmw_ctx_binding_so
,
2750 binding
.offset
= cmd
->targets
[i
].offset
;
2751 binding
.size
= cmd
->targets
[i
].sizeInBytes
;
2754 vmw_binding_add(ctx_node
->staged_bindings
, &binding
.bi
,
2761 static int vmw_cmd_dx_so_define(struct vmw_private
*dev_priv
,
2762 struct vmw_sw_context
*sw_context
,
2763 SVGA3dCmdHeader
*header
)
2765 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
2766 struct vmw_resource
*res
;
2768 * This is based on the fact that all affected define commands have
2769 * the same initial command body layout.
2772 SVGA3dCmdHeader header
;
2775 enum vmw_so_type so_type
;
2778 if (unlikely(ctx_node
== NULL
)) {
2779 DRM_ERROR("DX Context not set.\n");
2783 so_type
= vmw_so_cmd_to_type(header
->id
);
2784 res
= vmw_context_cotable(ctx_node
->res
, vmw_so_cotables
[so_type
]);
2785 cmd
= container_of(header
, typeof(*cmd
), header
);
2786 ret
= vmw_cotable_notify(res
, cmd
->defined_id
);
2787 vmw_resource_unreference(&res
);
2793 * vmw_cmd_dx_check_subresource - Validate an
2794 * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2796 * @dev_priv: Pointer to a device private struct.
2797 * @sw_context: The software context being used for this batch.
2798 * @header: Pointer to the command header in the command stream.
2800 static int vmw_cmd_dx_check_subresource(struct vmw_private
*dev_priv
,
2801 struct vmw_sw_context
*sw_context
,
2802 SVGA3dCmdHeader
*header
)
2805 SVGA3dCmdHeader header
;
2807 SVGA3dCmdDXReadbackSubResource r_body
;
2808 SVGA3dCmdDXInvalidateSubResource i_body
;
2809 SVGA3dCmdDXUpdateSubResource u_body
;
2810 SVGA3dSurfaceId sid
;
2814 BUILD_BUG_ON(offsetof(typeof(*cmd
), r_body
.sid
) !=
2815 offsetof(typeof(*cmd
), sid
));
2816 BUILD_BUG_ON(offsetof(typeof(*cmd
), i_body
.sid
) !=
2817 offsetof(typeof(*cmd
), sid
));
2818 BUILD_BUG_ON(offsetof(typeof(*cmd
), u_body
.sid
) !=
2819 offsetof(typeof(*cmd
), sid
));
2821 cmd
= container_of(header
, typeof(*cmd
), header
);
2823 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2824 user_surface_converter
,
2828 static int vmw_cmd_dx_cid_check(struct vmw_private
*dev_priv
,
2829 struct vmw_sw_context
*sw_context
,
2830 SVGA3dCmdHeader
*header
)
2832 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
2834 if (unlikely(ctx_node
== NULL
)) {
2835 DRM_ERROR("DX Context not set.\n");
2843 * vmw_cmd_dx_view_remove - validate a view remove command and
2844 * schedule the view resource for removal.
2846 * @dev_priv: Pointer to a device private struct.
2847 * @sw_context: The software context being used for this batch.
2848 * @header: Pointer to the command header in the command stream.
2850 * Check that the view exists, and if it was not created using this
2851 * command batch, make sure it's validated (present in the device) so that
2852 * the remove command will not confuse the device.
2854 static int vmw_cmd_dx_view_remove(struct vmw_private
*dev_priv
,
2855 struct vmw_sw_context
*sw_context
,
2856 SVGA3dCmdHeader
*header
)
2858 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
2860 SVGA3dCmdHeader header
;
2861 union vmw_view_destroy body
;
2862 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2863 enum vmw_view_type view_type
= vmw_view_cmd_to_type(header
->id
);
2864 struct vmw_resource
*view
;
2868 DRM_ERROR("DX Context not set.\n");
2872 ret
= vmw_view_remove(sw_context
->man
,
2873 cmd
->body
.view_id
, view_type
,
2874 &sw_context
->staged_cmd_res
,
2880 * Add view to the validate list iff it was not created using this
2883 return vmw_view_res_val_add(sw_context
, view
);
2887 * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2890 * @dev_priv: Pointer to a device private struct.
2891 * @sw_context: The software context being used for this batch.
2892 * @header: Pointer to the command header in the command stream.
2894 static int vmw_cmd_dx_define_shader(struct vmw_private
*dev_priv
,
2895 struct vmw_sw_context
*sw_context
,
2896 SVGA3dCmdHeader
*header
)
2898 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
2899 struct vmw_resource
*res
;
2901 SVGA3dCmdHeader header
;
2902 SVGA3dCmdDXDefineShader body
;
2903 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2907 DRM_ERROR("DX Context not set.\n");
2911 res
= vmw_context_cotable(ctx_node
->res
, SVGA_COTABLE_DXSHADER
);
2912 ret
= vmw_cotable_notify(res
, cmd
->body
.shaderId
);
2913 vmw_resource_unreference(&res
);
2917 return vmw_dx_shader_add(sw_context
->man
, ctx_node
->res
,
2918 cmd
->body
.shaderId
, cmd
->body
.type
,
2919 &sw_context
->staged_cmd_res
);
2923 * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2926 * @dev_priv: Pointer to a device private struct.
2927 * @sw_context: The software context being used for this batch.
2928 * @header: Pointer to the command header in the command stream.
2930 static int vmw_cmd_dx_destroy_shader(struct vmw_private
*dev_priv
,
2931 struct vmw_sw_context
*sw_context
,
2932 SVGA3dCmdHeader
*header
)
2934 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
2936 SVGA3dCmdHeader header
;
2937 SVGA3dCmdDXDestroyShader body
;
2938 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2942 DRM_ERROR("DX Context not set.\n");
2946 ret
= vmw_shader_remove(sw_context
->man
, cmd
->body
.shaderId
, 0,
2947 &sw_context
->staged_cmd_res
);
2949 DRM_ERROR("Could not find shader to remove.\n");
2955 * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
2958 * @dev_priv: Pointer to a device private struct.
2959 * @sw_context: The software context being used for this batch.
2960 * @header: Pointer to the command header in the command stream.
2962 static int vmw_cmd_dx_bind_shader(struct vmw_private
*dev_priv
,
2963 struct vmw_sw_context
*sw_context
,
2964 SVGA3dCmdHeader
*header
)
2966 struct vmw_resource_val_node
*ctx_node
;
2967 struct vmw_resource_val_node
*res_node
;
2968 struct vmw_resource
*res
;
2970 SVGA3dCmdHeader header
;
2971 SVGA3dCmdDXBindShader body
;
2972 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2975 if (cmd
->body
.cid
!= SVGA3D_INVALID_ID
) {
2976 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
2977 user_context_converter
,
2978 &cmd
->body
.cid
, &ctx_node
);
2982 ctx_node
= sw_context
->dx_ctx_node
;
2984 DRM_ERROR("DX Context not set.\n");
2989 res
= vmw_shader_lookup(vmw_context_res_man(ctx_node
->res
),
2992 DRM_ERROR("Could not find shader to bind.\n");
2993 return PTR_ERR(res
);
2996 ret
= vmw_resource_val_add(sw_context
, res
, &res_node
);
2998 DRM_ERROR("Error creating resource validation node.\n");
3003 ret
= vmw_cmd_res_switch_backup(dev_priv
, sw_context
, res_node
,
3005 cmd
->body
.offsetInBytes
);
3007 vmw_resource_unreference(&res
);
3012 static int vmw_cmd_check_not_3d(struct vmw_private
*dev_priv
,
3013 struct vmw_sw_context
*sw_context
,
3014 void *buf
, uint32_t *size
)
3016 uint32_t size_remaining
= *size
;
3019 cmd_id
= ((uint32_t *)buf
)[0];
3021 case SVGA_CMD_UPDATE
:
3022 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate
);
3024 case SVGA_CMD_DEFINE_GMRFB
:
3025 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB
);
3027 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN
:
3028 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen
);
3030 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB
:
3031 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen
);
3034 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id
);
3038 if (*size
> size_remaining
) {
3039 DRM_ERROR("Invalid SVGA command (size mismatch):"
3044 if (unlikely(!sw_context
->kernel
)) {
3045 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id
);
3049 if (cmd_id
== SVGA_CMD_DEFINE_GMRFB
)
3050 return vmw_cmd_check_define_gmrfb(dev_priv
, sw_context
, buf
);
3055 static const struct vmw_cmd_entry vmw_cmd_entries
[SVGA_3D_CMD_MAX
] = {
3056 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE
, &vmw_cmd_invalid
,
3057 false, false, false),
3058 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY
, &vmw_cmd_invalid
,
3059 false, false, false),
3060 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY
, &vmw_cmd_surface_copy_check
,
3061 true, false, false),
3062 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT
, &vmw_cmd_stretch_blt_check
,
3063 true, false, false),
3064 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA
, &vmw_cmd_dma
,
3065 true, false, false),
3066 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE
, &vmw_cmd_invalid
,
3067 false, false, false),
3068 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY
, &vmw_cmd_invalid
,
3069 false, false, false),
3070 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM
, &vmw_cmd_cid_check
,
3071 true, false, false),
3072 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE
, &vmw_cmd_cid_check
,
3073 true, false, false),
3074 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE
, &vmw_cmd_cid_check
,
3075 true, false, false),
3076 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET
,
3077 &vmw_cmd_set_render_target_check
, true, false, false),
3078 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE
, &vmw_cmd_tex_state
,
3079 true, false, false),
3080 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL
, &vmw_cmd_cid_check
,
3081 true, false, false),
3082 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA
, &vmw_cmd_cid_check
,
3083 true, false, false),
3084 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED
, &vmw_cmd_cid_check
,
3085 true, false, false),
3086 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT
, &vmw_cmd_cid_check
,
3087 true, false, false),
3088 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE
, &vmw_cmd_cid_check
,
3089 true, false, false),
3090 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR
, &vmw_cmd_cid_check
,
3091 true, false, false),
3092 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT
, &vmw_cmd_present_check
,
3093 false, false, false),
3094 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE
, &vmw_cmd_shader_define
,
3095 true, false, false),
3096 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY
, &vmw_cmd_shader_destroy
,
3097 true, false, false),
3098 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER
, &vmw_cmd_set_shader
,
3099 true, false, false),
3100 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST
, &vmw_cmd_set_shader_const
,
3101 true, false, false),
3102 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES
, &vmw_cmd_draw
,
3103 true, false, false),
3104 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT
, &vmw_cmd_cid_check
,
3105 true, false, false),
3106 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY
, &vmw_cmd_begin_query
,
3107 true, false, false),
3108 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY
, &vmw_cmd_end_query
,
3109 true, false, false),
3110 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY
, &vmw_cmd_wait_query
,
3111 true, false, false),
3112 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK
, &vmw_cmd_ok
,
3113 true, false, false),
3114 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN
,
3115 &vmw_cmd_blt_surf_screen_check
, false, false, false),
3116 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2
, &vmw_cmd_invalid
,
3117 false, false, false),
3118 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS
, &vmw_cmd_invalid
,
3119 false, false, false),
3120 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE
, &vmw_cmd_invalid
,
3121 false, false, false),
3122 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE
, &vmw_cmd_invalid
,
3123 false, false, false),
3124 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA
, &vmw_cmd_invalid
,
3125 false, false, false),
3126 VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE
, &vmw_cmd_invalid
,
3127 false, false, false),
3128 VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE
, &vmw_cmd_invalid
,
3129 false, false, false),
3130 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT
, &vmw_cmd_invalid
,
3131 false, false, false),
3132 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT
, &vmw_cmd_invalid
,
3133 false, false, false),
3134 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT
, &vmw_cmd_invalid
,
3135 false, false, false),
3136 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL
, &vmw_cmd_invalid
,
3137 false, false, false),
3138 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND
, &vmw_cmd_invalid
,
3139 false, false, false),
3140 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND
, &vmw_cmd_invalid
,
3141 false, false, false),
3142 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE
, &vmw_cmd_invalid
,
3143 false, false, true),
3144 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE
, &vmw_cmd_invalid
,
3145 false, false, true),
3146 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB
, &vmw_cmd_invalid
,
3147 false, false, true),
3148 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB
, &vmw_cmd_invalid
,
3149 false, false, true),
3150 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64
, &vmw_cmd_invalid
,
3151 false, false, true),
3152 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING
, &vmw_cmd_invalid
,
3153 false, false, true),
3154 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE
, &vmw_cmd_invalid
,
3155 false, false, true),
3156 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE
, &vmw_cmd_invalid
,
3157 false, false, true),
3158 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE
, &vmw_cmd_bind_gb_surface
,
3160 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE
, &vmw_cmd_invalid
,
3161 false, false, true),
3162 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE
, &vmw_cmd_update_gb_image
,
3164 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE
,
3165 &vmw_cmd_update_gb_surface
, true, false, true),
3166 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE
,
3167 &vmw_cmd_readback_gb_image
, true, false, true),
3168 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE
,
3169 &vmw_cmd_readback_gb_surface
, true, false, true),
3170 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE
,
3171 &vmw_cmd_invalidate_gb_image
, true, false, true),
3172 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE
,
3173 &vmw_cmd_invalidate_gb_surface
, true, false, true),
3174 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT
, &vmw_cmd_invalid
,
3175 false, false, true),
3176 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT
, &vmw_cmd_invalid
,
3177 false, false, true),
3178 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT
, &vmw_cmd_invalid
,
3179 false, false, true),
3180 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT
, &vmw_cmd_invalid
,
3181 false, false, true),
3182 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT
, &vmw_cmd_invalid
,
3183 false, false, true),
3184 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER
, &vmw_cmd_invalid
,
3185 false, false, true),
3186 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER
, &vmw_cmd_bind_gb_shader
,
3188 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER
, &vmw_cmd_invalid
,
3189 false, false, true),
3190 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64
, &vmw_cmd_invalid
,
3191 false, false, false),
3192 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY
, &vmw_cmd_begin_gb_query
,
3194 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY
, &vmw_cmd_end_gb_query
,
3196 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY
, &vmw_cmd_wait_gb_query
,
3198 VMW_CMD_DEF(SVGA_3D_CMD_NOP
, &vmw_cmd_ok
,
3200 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART
, &vmw_cmd_invalid
,
3201 false, false, true),
3202 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART
, &vmw_cmd_invalid
,
3203 false, false, true),
3204 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART
, &vmw_cmd_invalid
,
3205 false, false, true),
3206 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE
, &vmw_cmd_invalid
,
3207 false, false, true),
3208 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET
, &vmw_cmd_invalid
,
3209 false, false, true),
3210 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET
, &vmw_cmd_invalid
,
3211 false, false, true),
3212 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET
, &vmw_cmd_invalid
,
3213 false, false, true),
3214 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET
, &vmw_cmd_invalid
,
3215 false, false, true),
3216 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL
, &vmw_cmd_invalid
,
3217 false, false, true),
3218 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL
, &vmw_cmd_invalid
,
3219 false, false, true),
3220 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE
, &vmw_cmd_cid_check
,
3222 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA
, &vmw_cmd_invalid
,
3223 false, false, true),
3224 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH
, &vmw_cmd_invalid
,
3225 false, false, true),
3226 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE
, &vmw_cmd_invalid
,
3227 false, false, true),
3228 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2
, &vmw_cmd_invalid
,
3229 false, false, true),
3234 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT
, &vmw_cmd_invalid
,
3235 false, false, true),
3236 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT
, &vmw_cmd_invalid
,
3237 false, false, true),
3238 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT
, &vmw_cmd_invalid
,
3239 false, false, true),
3240 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT
, &vmw_cmd_invalid
,
3241 false, false, true),
3242 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT
, &vmw_cmd_invalid
,
3243 false, false, true),
3244 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER
,
3245 &vmw_cmd_dx_set_single_constant_buffer
, true, false, true),
3246 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
,
3247 &vmw_cmd_dx_set_shader_res
, true, false, true),
3248 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER
, &vmw_cmd_dx_set_shader
,
3250 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS
, &vmw_cmd_dx_cid_check
,
3252 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW
, &vmw_cmd_dx_cid_check
,
3254 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED
, &vmw_cmd_dx_cid_check
,
3256 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED
, &vmw_cmd_dx_cid_check
,
3258 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED
,
3259 &vmw_cmd_dx_cid_check
, true, false, true),
3260 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO
, &vmw_cmd_dx_cid_check
,
3262 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
,
3263 &vmw_cmd_dx_set_vertex_buffers
, true, false, true),
3264 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER
,
3265 &vmw_cmd_dx_set_index_buffer
, true, false, true),
3266 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS
,
3267 &vmw_cmd_dx_set_rendertargets
, true, false, true),
3268 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE
, &vmw_cmd_dx_cid_check
,
3270 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE
,
3271 &vmw_cmd_dx_cid_check
, true, false, true),
3272 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE
,
3273 &vmw_cmd_dx_cid_check
, true, false, true),
3274 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY
, &vmw_cmd_dx_define_query
,
3276 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY
, &vmw_cmd_ok
,
3278 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY
, &vmw_cmd_dx_bind_query
,
3280 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET
,
3281 &vmw_cmd_ok
, true, false, true),
3282 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY
, &vmw_cmd_ok
,
3284 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY
, &vmw_cmd_ok
,
3286 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY
, &vmw_cmd_invalid
,
3288 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION
, &vmw_cmd_invalid
,
3290 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS
, &vmw_cmd_dx_cid_check
,
3292 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS
, &vmw_cmd_dx_cid_check
,
3294 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW
,
3295 &vmw_cmd_dx_clear_rendertarget_view
, true, false, true),
3296 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW
,
3297 &vmw_cmd_dx_clear_depthstencil_view
, true, false, true),
3298 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY
, &vmw_cmd_invalid
,
3300 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS
, &vmw_cmd_invalid
,
3302 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE
,
3303 &vmw_cmd_dx_check_subresource
, true, false, true),
3304 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE
,
3305 &vmw_cmd_dx_check_subresource
, true, false, true),
3306 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE
,
3307 &vmw_cmd_dx_check_subresource
, true, false, true),
3308 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW
,
3309 &vmw_cmd_dx_view_define
, true, false, true),
3310 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW
,
3311 &vmw_cmd_dx_view_remove
, true, false, true),
3312 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW
,
3313 &vmw_cmd_dx_view_define
, true, false, true),
3314 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW
,
3315 &vmw_cmd_dx_view_remove
, true, false, true),
3316 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW
,
3317 &vmw_cmd_dx_view_define
, true, false, true),
3318 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW
,
3319 &vmw_cmd_dx_view_remove
, true, false, true),
3320 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT
,
3321 &vmw_cmd_dx_so_define
, true, false, true),
3322 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT
,
3323 &vmw_cmd_dx_cid_check
, true, false, true),
3324 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE
,
3325 &vmw_cmd_dx_so_define
, true, false, true),
3326 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE
,
3327 &vmw_cmd_dx_cid_check
, true, false, true),
3328 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE
,
3329 &vmw_cmd_dx_so_define
, true, false, true),
3330 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE
,
3331 &vmw_cmd_dx_cid_check
, true, false, true),
3332 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE
,
3333 &vmw_cmd_dx_so_define
, true, false, true),
3334 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE
,
3335 &vmw_cmd_dx_cid_check
, true, false, true),
3336 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE
,
3337 &vmw_cmd_dx_so_define
, true, false, true),
3338 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE
,
3339 &vmw_cmd_dx_cid_check
, true, false, true),
3340 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER
,
3341 &vmw_cmd_dx_define_shader
, true, false, true),
3342 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER
,
3343 &vmw_cmd_dx_destroy_shader
, true, false, true),
3344 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER
,
3345 &vmw_cmd_dx_bind_shader
, true, false, true),
3346 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT
,
3347 &vmw_cmd_dx_so_define
, true, false, true),
3348 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT
,
3349 &vmw_cmd_dx_cid_check
, true, false, true),
3350 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT
, &vmw_cmd_dx_cid_check
,
3352 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS
,
3353 &vmw_cmd_dx_set_so_targets
, true, false, true),
3354 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT
,
3355 &vmw_cmd_dx_cid_check
, true, false, true),
3356 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY
,
3357 &vmw_cmd_dx_cid_check
, true, false, true),
3358 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY
,
3359 &vmw_cmd_buffer_copy_check
, true, false, true),
3360 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION
,
3361 &vmw_cmd_pred_copy_check
, true, false, true),
3364 static int vmw_cmd_check(struct vmw_private
*dev_priv
,
3365 struct vmw_sw_context
*sw_context
,
3366 void *buf
, uint32_t *size
)
3369 uint32_t size_remaining
= *size
;
3370 SVGA3dCmdHeader
*header
= (SVGA3dCmdHeader
*) buf
;
3372 const struct vmw_cmd_entry
*entry
;
3373 bool gb
= dev_priv
->capabilities
& SVGA_CAP_GBOBJECTS
;
3375 cmd_id
= ((uint32_t *)buf
)[0];
3376 /* Handle any none 3D commands */
3377 if (unlikely(cmd_id
< SVGA_CMD_MAX
))
3378 return vmw_cmd_check_not_3d(dev_priv
, sw_context
, buf
, size
);
3381 cmd_id
= header
->id
;
3382 *size
= header
->size
+ sizeof(SVGA3dCmdHeader
);
3384 cmd_id
-= SVGA_3D_CMD_BASE
;
3385 if (unlikely(*size
> size_remaining
))
3388 if (unlikely(cmd_id
>= SVGA_3D_CMD_MAX
- SVGA_3D_CMD_BASE
))
3391 entry
= &vmw_cmd_entries
[cmd_id
];
3392 if (unlikely(!entry
->func
))
3395 if (unlikely(!entry
->user_allow
&& !sw_context
->kernel
))
3396 goto out_privileged
;
3398 if (unlikely(entry
->gb_disable
&& gb
))
3401 if (unlikely(entry
->gb_enable
&& !gb
))
3404 ret
= entry
->func(dev_priv
, sw_context
, header
);
3405 if (unlikely(ret
!= 0))
3410 DRM_ERROR("Invalid SVGA3D command: %d\n",
3411 cmd_id
+ SVGA_3D_CMD_BASE
);
3414 DRM_ERROR("Privileged SVGA3D command: %d\n",
3415 cmd_id
+ SVGA_3D_CMD_BASE
);
3418 DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3419 cmd_id
+ SVGA_3D_CMD_BASE
);
3422 DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
3423 cmd_id
+ SVGA_3D_CMD_BASE
);
3427 static int vmw_cmd_check_all(struct vmw_private
*dev_priv
,
3428 struct vmw_sw_context
*sw_context
,
3432 int32_t cur_size
= size
;
3435 sw_context
->buf_start
= buf
;
3437 while (cur_size
> 0) {
3439 ret
= vmw_cmd_check(dev_priv
, sw_context
, buf
, &size
);
3440 if (unlikely(ret
!= 0))
3442 buf
= (void *)((unsigned long) buf
+ size
);
3446 if (unlikely(cur_size
!= 0)) {
3447 DRM_ERROR("Command verifier out of sync.\n");
3454 static void vmw_free_relocations(struct vmw_sw_context
*sw_context
)
3456 sw_context
->cur_reloc
= 0;
3459 static void vmw_apply_relocations(struct vmw_sw_context
*sw_context
)
3462 struct vmw_relocation
*reloc
;
3463 struct ttm_validate_buffer
*validate
;
3464 struct ttm_buffer_object
*bo
;
3466 for (i
= 0; i
< sw_context
->cur_reloc
; ++i
) {
3467 reloc
= &sw_context
->relocs
[i
];
3468 validate
= &sw_context
->val_bufs
[reloc
->index
].base
;
3470 switch (bo
->mem
.mem_type
) {
3472 reloc
->location
->offset
+= bo
->offset
;
3473 reloc
->location
->gmrId
= SVGA_GMR_FRAMEBUFFER
;
3476 reloc
->location
->gmrId
= bo
->mem
.start
;
3479 *reloc
->mob_loc
= bo
->mem
.start
;
3485 vmw_free_relocations(sw_context
);
3489 * vmw_resource_list_unrefererence - Free up a resource list and unreference
3490 * all resources referenced by it.
3492 * @list: The resource list.
3494 static void vmw_resource_list_unreference(struct vmw_sw_context
*sw_context
,
3495 struct list_head
*list
)
3497 struct vmw_resource_val_node
*val
, *val_next
;
3500 * Drop references to resources held during command submission.
3503 list_for_each_entry_safe(val
, val_next
, list
, head
) {
3504 list_del_init(&val
->head
);
3505 vmw_resource_unreference(&val
->res
);
3507 if (val
->staged_bindings
) {
3508 if (val
->staged_bindings
!= sw_context
->staged_bindings
)
3509 vmw_binding_state_free(val
->staged_bindings
);
3511 sw_context
->staged_bindings_inuse
= false;
3512 val
->staged_bindings
= NULL
;
3519 static void vmw_clear_validations(struct vmw_sw_context
*sw_context
)
3521 struct vmw_validate_buffer
*entry
, *next
;
3522 struct vmw_resource_val_node
*val
;
3525 * Drop references to DMA buffers held during command submission.
3527 list_for_each_entry_safe(entry
, next
, &sw_context
->validate_nodes
,
3529 list_del(&entry
->base
.head
);
3530 ttm_bo_unref(&entry
->base
.bo
);
3531 (void) drm_ht_remove_item(&sw_context
->res_ht
, &entry
->hash
);
3532 sw_context
->cur_val_buf
--;
3534 BUG_ON(sw_context
->cur_val_buf
!= 0);
3536 list_for_each_entry(val
, &sw_context
->resource_list
, head
)
3537 (void) drm_ht_remove_item(&sw_context
->res_ht
, &val
->hash
);
3540 int vmw_validate_single_buffer(struct vmw_private
*dev_priv
,
3541 struct ttm_buffer_object
*bo
,
3543 bool validate_as_mob
)
3545 struct vmw_dma_buffer
*vbo
= container_of(bo
, struct vmw_dma_buffer
,
3549 if (vbo
->pin_count
> 0)
3552 if (validate_as_mob
)
3553 return ttm_bo_validate(bo
, &vmw_mob_placement
, interruptible
,
3557 * Put BO in VRAM if there is space, otherwise as a GMR.
3558 * If there is no space in VRAM and GMR ids are all used up,
3559 * start evicting GMRs to make room. If the DMA buffer can't be
3560 * used as a GMR, this will return -ENOMEM.
3563 ret
= ttm_bo_validate(bo
, &vmw_vram_gmr_placement
, interruptible
,
3565 if (likely(ret
== 0 || ret
== -ERESTARTSYS
))
3569 * If that failed, try VRAM again, this time evicting
3570 * previous contents.
3573 ret
= ttm_bo_validate(bo
, &vmw_vram_placement
, interruptible
, false);
3577 static int vmw_validate_buffers(struct vmw_private
*dev_priv
,
3578 struct vmw_sw_context
*sw_context
)
3580 struct vmw_validate_buffer
*entry
;
3583 list_for_each_entry(entry
, &sw_context
->validate_nodes
, base
.head
) {
3584 ret
= vmw_validate_single_buffer(dev_priv
, entry
->base
.bo
,
3586 entry
->validate_as_mob
);
3587 if (unlikely(ret
!= 0))
3593 static int vmw_resize_cmd_bounce(struct vmw_sw_context
*sw_context
,
3596 if (likely(sw_context
->cmd_bounce_size
>= size
))
3599 if (sw_context
->cmd_bounce_size
== 0)
3600 sw_context
->cmd_bounce_size
= VMWGFX_CMD_BOUNCE_INIT_SIZE
;
3602 while (sw_context
->cmd_bounce_size
< size
) {
3603 sw_context
->cmd_bounce_size
=
3604 PAGE_ALIGN(sw_context
->cmd_bounce_size
+
3605 (sw_context
->cmd_bounce_size
>> 1));
3608 if (sw_context
->cmd_bounce
!= NULL
)
3609 vfree(sw_context
->cmd_bounce
);
3611 sw_context
->cmd_bounce
= vmalloc(sw_context
->cmd_bounce_size
);
3613 if (sw_context
->cmd_bounce
== NULL
) {
3614 DRM_ERROR("Failed to allocate command bounce buffer.\n");
3615 sw_context
->cmd_bounce_size
= 0;
3623 * vmw_execbuf_fence_commands - create and submit a command stream fence
3625 * Creates a fence object and submits a command stream marker.
3626 * If this fails for some reason, We sync the fifo and return NULL.
3627 * It is then safe to fence buffers with a NULL pointer.
3629 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3630 * a userspace handle if @p_handle is not NULL, otherwise not.
3633 int vmw_execbuf_fence_commands(struct drm_file
*file_priv
,
3634 struct vmw_private
*dev_priv
,
3635 struct vmw_fence_obj
**p_fence
,
3640 bool synced
= false;
3642 /* p_handle implies file_priv. */
3643 BUG_ON(p_handle
!= NULL
&& file_priv
== NULL
);
3645 ret
= vmw_fifo_send_fence(dev_priv
, &sequence
);
3646 if (unlikely(ret
!= 0)) {
3647 DRM_ERROR("Fence submission error. Syncing.\n");
3651 if (p_handle
!= NULL
)
3652 ret
= vmw_user_fence_create(file_priv
, dev_priv
->fman
,
3653 sequence
, p_fence
, p_handle
);
3655 ret
= vmw_fence_create(dev_priv
->fman
, sequence
, p_fence
);
3657 if (unlikely(ret
!= 0 && !synced
)) {
3658 (void) vmw_fallback_wait(dev_priv
, false, false,
3660 VMW_FENCE_WAIT_TIMEOUT
);
3668 * vmw_execbuf_copy_fence_user - copy fence object information to
3671 * @dev_priv: Pointer to a vmw_private struct.
3672 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3673 * @ret: Return value from fence object creation.
3674 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3675 * which the information should be copied.
3676 * @fence: Pointer to the fenc object.
3677 * @fence_handle: User-space fence handle.
3679 * This function copies fence information to user-space. If copying fails,
3680 * The user-space struct drm_vmw_fence_rep::error member is hopefully
3681 * left untouched, and if it's preloaded with an -EFAULT by user-space,
3682 * the error will hopefully be detected.
3683 * Also if copying fails, user-space will be unable to signal the fence
3684 * object so we wait for it immediately, and then unreference the
3685 * user-space reference.
3688 vmw_execbuf_copy_fence_user(struct vmw_private
*dev_priv
,
3689 struct vmw_fpriv
*vmw_fp
,
3691 struct drm_vmw_fence_rep __user
*user_fence_rep
,
3692 struct vmw_fence_obj
*fence
,
3693 uint32_t fence_handle
)
3695 struct drm_vmw_fence_rep fence_rep
;
3697 if (user_fence_rep
== NULL
)
3700 memset(&fence_rep
, 0, sizeof(fence_rep
));
3702 fence_rep
.error
= ret
;
3704 BUG_ON(fence
== NULL
);
3706 fence_rep
.handle
= fence_handle
;
3707 fence_rep
.seqno
= fence
->base
.seqno
;
3708 vmw_update_seqno(dev_priv
, &dev_priv
->fifo
);
3709 fence_rep
.passed_seqno
= dev_priv
->last_read_seqno
;
3713 * copy_to_user errors will be detected by user space not
3714 * seeing fence_rep::error filled in. Typically
3715 * user-space would have pre-set that member to -EFAULT.
3717 ret
= copy_to_user(user_fence_rep
, &fence_rep
,
3721 * User-space lost the fence object. We need to sync
3722 * and unreference the handle.
3724 if (unlikely(ret
!= 0) && (fence_rep
.error
== 0)) {
3725 ttm_ref_object_base_unref(vmw_fp
->tfile
,
3726 fence_handle
, TTM_REF_USAGE
);
3727 DRM_ERROR("Fence copy error. Syncing.\n");
3728 (void) vmw_fence_obj_wait(fence
, false, false,
3729 VMW_FENCE_WAIT_TIMEOUT
);
3734 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3737 * @dev_priv: Pointer to a device private structure.
3738 * @kernel_commands: Pointer to the unpatched command batch.
3739 * @command_size: Size of the unpatched command batch.
3740 * @sw_context: Structure holding the relocation lists.
3742 * Side effects: If this function returns 0, then the command batch
3743 * pointed to by @kernel_commands will have been modified.
3745 static int vmw_execbuf_submit_fifo(struct vmw_private
*dev_priv
,
3746 void *kernel_commands
,
3748 struct vmw_sw_context
*sw_context
)
3752 if (sw_context
->dx_ctx_node
)
3753 cmd
= vmw_fifo_reserve_dx(dev_priv
, command_size
,
3754 sw_context
->dx_ctx_node
->res
->id
);
3756 cmd
= vmw_fifo_reserve(dev_priv
, command_size
);
3758 DRM_ERROR("Failed reserving fifo space for commands.\n");
3762 vmw_apply_relocations(sw_context
);
3763 memcpy(cmd
, kernel_commands
, command_size
);
3764 vmw_resource_relocations_apply(cmd
, &sw_context
->res_relocations
);
3765 vmw_resource_relocations_free(&sw_context
->res_relocations
);
3766 vmw_fifo_commit(dev_priv
, command_size
);
3772 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3773 * the command buffer manager.
3775 * @dev_priv: Pointer to a device private structure.
3776 * @header: Opaque handle to the command buffer allocation.
3777 * @command_size: Size of the unpatched command batch.
3778 * @sw_context: Structure holding the relocation lists.
3780 * Side effects: If this function returns 0, then the command buffer
3781 * represented by @header will have been modified.
3783 static int vmw_execbuf_submit_cmdbuf(struct vmw_private
*dev_priv
,
3784 struct vmw_cmdbuf_header
*header
,
3786 struct vmw_sw_context
*sw_context
)
3788 u32 id
= ((sw_context
->dx_ctx_node
) ? sw_context
->dx_ctx_node
->res
->id
:
3790 void *cmd
= vmw_cmdbuf_reserve(dev_priv
->cman
, command_size
,
3793 vmw_apply_relocations(sw_context
);
3794 vmw_resource_relocations_apply(cmd
, &sw_context
->res_relocations
);
3795 vmw_resource_relocations_free(&sw_context
->res_relocations
);
3796 vmw_cmdbuf_commit(dev_priv
->cman
, command_size
, header
, false);
3802 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3803 * submission using a command buffer.
3805 * @dev_priv: Pointer to a device private structure.
3806 * @user_commands: User-space pointer to the commands to be submitted.
3807 * @command_size: Size of the unpatched command batch.
3808 * @header: Out parameter returning the opaque pointer to the command buffer.
3810 * This function checks whether we can use the command buffer manager for
3811 * submission and if so, creates a command buffer of suitable size and
3812 * copies the user data into that buffer.
3814 * On successful return, the function returns a pointer to the data in the
3815 * command buffer and *@header is set to non-NULL.
3816 * If command buffers could not be used, the function will return the value
3817 * of @kernel_commands on function call. That value may be NULL. In that case,
3818 * the value of *@header will be set to NULL.
3819 * If an error is encountered, the function will return a pointer error value.
3820 * If the function is interrupted by a signal while sleeping, it will return
3821 * -ERESTARTSYS casted to a pointer error value.
3823 static void *vmw_execbuf_cmdbuf(struct vmw_private
*dev_priv
,
3824 void __user
*user_commands
,
3825 void *kernel_commands
,
3827 struct vmw_cmdbuf_header
**header
)
3833 if (!dev_priv
->cman
|| kernel_commands
)
3834 return kernel_commands
;
3836 if (command_size
> SVGA_CB_MAX_SIZE
) {
3837 DRM_ERROR("Command buffer is too large.\n");
3838 return ERR_PTR(-EINVAL
);
3841 /* If possible, add a little space for fencing. */
3842 cmdbuf_size
= command_size
+ 512;
3843 cmdbuf_size
= min_t(size_t, cmdbuf_size
, SVGA_CB_MAX_SIZE
);
3844 kernel_commands
= vmw_cmdbuf_alloc(dev_priv
->cman
, cmdbuf_size
,
3846 if (IS_ERR(kernel_commands
))
3847 return kernel_commands
;
3849 ret
= copy_from_user(kernel_commands
, user_commands
,
3852 DRM_ERROR("Failed copying commands.\n");
3853 vmw_cmdbuf_header_free(*header
);
3855 return ERR_PTR(-EFAULT
);
3858 return kernel_commands
;
3861 static int vmw_execbuf_tie_context(struct vmw_private
*dev_priv
,
3862 struct vmw_sw_context
*sw_context
,
3865 struct vmw_resource_val_node
*ctx_node
;
3866 struct vmw_resource
*res
;
3869 if (handle
== SVGA3D_INVALID_ID
)
3872 ret
= vmw_user_resource_lookup_handle(dev_priv
, sw_context
->fp
->tfile
,
3873 handle
, user_context_converter
,
3875 if (unlikely(ret
!= 0)) {
3876 DRM_ERROR("Could not find or user DX context 0x%08x.\n",
3881 ret
= vmw_resource_val_add(sw_context
, res
, &ctx_node
);
3882 if (unlikely(ret
!= 0))
3885 sw_context
->dx_ctx_node
= ctx_node
;
3886 sw_context
->man
= vmw_context_res_man(res
);
3888 vmw_resource_unreference(&res
);
3892 int vmw_execbuf_process(struct drm_file
*file_priv
,
3893 struct vmw_private
*dev_priv
,
3894 void __user
*user_commands
,
3895 void *kernel_commands
,
3896 uint32_t command_size
,
3897 uint64_t throttle_us
,
3898 uint32_t dx_context_handle
,
3899 struct drm_vmw_fence_rep __user
*user_fence_rep
,
3900 struct vmw_fence_obj
**out_fence
)
3902 struct vmw_sw_context
*sw_context
= &dev_priv
->ctx
;
3903 struct vmw_fence_obj
*fence
= NULL
;
3904 struct vmw_resource
*error_resource
;
3905 struct list_head resource_list
;
3906 struct vmw_cmdbuf_header
*header
;
3907 struct ww_acquire_ctx ticket
;
3912 ret
= vmw_wait_lag(dev_priv
, &dev_priv
->fifo
.marker_queue
,
3919 kernel_commands
= vmw_execbuf_cmdbuf(dev_priv
, user_commands
,
3920 kernel_commands
, command_size
,
3922 if (IS_ERR(kernel_commands
))
3923 return PTR_ERR(kernel_commands
);
3925 ret
= mutex_lock_interruptible(&dev_priv
->cmdbuf_mutex
);
3928 goto out_free_header
;
3931 sw_context
->kernel
= false;
3932 if (kernel_commands
== NULL
) {
3933 ret
= vmw_resize_cmd_bounce(sw_context
, command_size
);
3934 if (unlikely(ret
!= 0))
3938 ret
= copy_from_user(sw_context
->cmd_bounce
,
3939 user_commands
, command_size
);
3941 if (unlikely(ret
!= 0)) {
3943 DRM_ERROR("Failed copying commands.\n");
3946 kernel_commands
= sw_context
->cmd_bounce
;
3948 sw_context
->kernel
= true;
3950 sw_context
->fp
= vmw_fpriv(file_priv
);
3951 sw_context
->cur_reloc
= 0;
3952 sw_context
->cur_val_buf
= 0;
3953 INIT_LIST_HEAD(&sw_context
->resource_list
);
3954 INIT_LIST_HEAD(&sw_context
->ctx_resource_list
);
3955 sw_context
->cur_query_bo
= dev_priv
->pinned_bo
;
3956 sw_context
->last_query_ctx
= NULL
;
3957 sw_context
->needs_post_query_barrier
= false;
3958 sw_context
->dx_ctx_node
= NULL
;
3959 sw_context
->dx_query_mob
= NULL
;
3960 sw_context
->dx_query_ctx
= NULL
;
3961 memset(sw_context
->res_cache
, 0, sizeof(sw_context
->res_cache
));
3962 INIT_LIST_HEAD(&sw_context
->validate_nodes
);
3963 INIT_LIST_HEAD(&sw_context
->res_relocations
);
3964 if (sw_context
->staged_bindings
)
3965 vmw_binding_state_reset(sw_context
->staged_bindings
);
3967 if (!sw_context
->res_ht_initialized
) {
3968 ret
= drm_ht_create(&sw_context
->res_ht
, VMW_RES_HT_ORDER
);
3969 if (unlikely(ret
!= 0))
3971 sw_context
->res_ht_initialized
= true;
3973 INIT_LIST_HEAD(&sw_context
->staged_cmd_res
);
3974 INIT_LIST_HEAD(&resource_list
);
3975 ret
= vmw_execbuf_tie_context(dev_priv
, sw_context
, dx_context_handle
);
3976 if (unlikely(ret
!= 0)) {
3977 list_splice_init(&sw_context
->ctx_resource_list
,
3978 &sw_context
->resource_list
);
3982 ret
= vmw_cmd_check_all(dev_priv
, sw_context
, kernel_commands
,
3985 * Merge the resource lists before checking the return status
3986 * from vmd_cmd_check_all so that all the open hashtabs will
3987 * be handled properly even if vmw_cmd_check_all fails.
3989 list_splice_init(&sw_context
->ctx_resource_list
,
3990 &sw_context
->resource_list
);
3992 if (unlikely(ret
!= 0))
3995 ret
= vmw_resources_reserve(sw_context
);
3996 if (unlikely(ret
!= 0))
3999 ret
= ttm_eu_reserve_buffers(&ticket
, &sw_context
->validate_nodes
,
4001 if (unlikely(ret
!= 0))
4004 ret
= vmw_validate_buffers(dev_priv
, sw_context
);
4005 if (unlikely(ret
!= 0))
4008 ret
= vmw_resources_validate(sw_context
);
4009 if (unlikely(ret
!= 0))
4012 ret
= mutex_lock_interruptible(&dev_priv
->binding_mutex
);
4013 if (unlikely(ret
!= 0)) {
4018 if (dev_priv
->has_mob
) {
4019 ret
= vmw_rebind_contexts(sw_context
);
4020 if (unlikely(ret
!= 0))
4021 goto out_unlock_binding
;
4025 ret
= vmw_execbuf_submit_fifo(dev_priv
, kernel_commands
,
4026 command_size
, sw_context
);
4028 ret
= vmw_execbuf_submit_cmdbuf(dev_priv
, header
, command_size
,
4032 mutex_unlock(&dev_priv
->binding_mutex
);
4036 vmw_query_bo_switch_commit(dev_priv
, sw_context
);
4037 ret
= vmw_execbuf_fence_commands(file_priv
, dev_priv
,
4039 (user_fence_rep
) ? &handle
: NULL
);
4041 * This error is harmless, because if fence submission fails,
4042 * vmw_fifo_send_fence will sync. The error will be propagated to
4043 * user-space in @fence_rep
4047 DRM_ERROR("Fence submission error. Syncing.\n");
4049 vmw_resources_unreserve(sw_context
, false);
4051 ttm_eu_fence_buffer_objects(&ticket
, &sw_context
->validate_nodes
,
4054 if (unlikely(dev_priv
->pinned_bo
!= NULL
&&
4055 !dev_priv
->query_cid_valid
))
4056 __vmw_execbuf_release_pinned_bo(dev_priv
, fence
);
4058 vmw_clear_validations(sw_context
);
4059 vmw_execbuf_copy_fence_user(dev_priv
, vmw_fpriv(file_priv
), ret
,
4060 user_fence_rep
, fence
, handle
);
4062 /* Don't unreference when handing fence out */
4063 if (unlikely(out_fence
!= NULL
)) {
4066 } else if (likely(fence
!= NULL
)) {
4067 vmw_fence_obj_unreference(&fence
);
4070 list_splice_init(&sw_context
->resource_list
, &resource_list
);
4071 vmw_cmdbuf_res_commit(&sw_context
->staged_cmd_res
);
4072 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
4075 * Unreference resources outside of the cmdbuf_mutex to
4076 * avoid deadlocks in resource destruction paths.
4078 vmw_resource_list_unreference(sw_context
, &resource_list
);
4083 mutex_unlock(&dev_priv
->binding_mutex
);
4085 ttm_eu_backoff_reservation(&ticket
, &sw_context
->validate_nodes
);
4087 vmw_resources_unreserve(sw_context
, true);
4088 vmw_resource_relocations_free(&sw_context
->res_relocations
);
4089 vmw_free_relocations(sw_context
);
4090 vmw_clear_validations(sw_context
);
4091 if (unlikely(dev_priv
->pinned_bo
!= NULL
&&
4092 !dev_priv
->query_cid_valid
))
4093 __vmw_execbuf_release_pinned_bo(dev_priv
, NULL
);
4095 list_splice_init(&sw_context
->resource_list
, &resource_list
);
4096 error_resource
= sw_context
->error_resource
;
4097 sw_context
->error_resource
= NULL
;
4098 vmw_cmdbuf_res_revert(&sw_context
->staged_cmd_res
);
4099 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
4102 * Unreference resources outside of the cmdbuf_mutex to
4103 * avoid deadlocks in resource destruction paths.
4105 vmw_resource_list_unreference(sw_context
, &resource_list
);
4106 if (unlikely(error_resource
!= NULL
))
4107 vmw_resource_unreference(&error_resource
);
4110 vmw_cmdbuf_header_free(header
);
4116 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4118 * @dev_priv: The device private structure.
4120 * This function is called to idle the fifo and unpin the query buffer
4121 * if the normal way to do this hits an error, which should typically be
4124 static void vmw_execbuf_unpin_panic(struct vmw_private
*dev_priv
)
4126 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
4128 (void) vmw_fallback_wait(dev_priv
, false, true, 0, false, 10*HZ
);
4129 vmw_bo_pin_reserved(dev_priv
->pinned_bo
, false);
4130 if (dev_priv
->dummy_query_bo_pinned
) {
4131 vmw_bo_pin_reserved(dev_priv
->dummy_query_bo
, false);
4132 dev_priv
->dummy_query_bo_pinned
= false;
4138 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4141 * @dev_priv: The device private structure.
4142 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
4143 * _after_ a query barrier that flushes all queries touching the current
4144 * buffer pointed to by @dev_priv->pinned_bo
4146 * This function should be used to unpin the pinned query bo, or
4147 * as a query barrier when we need to make sure that all queries have
4148 * finished before the next fifo command. (For example on hardware
4149 * context destructions where the hardware may otherwise leak unfinished
4152 * This function does not return any failure codes, but make attempts
4153 * to do safe unpinning in case of errors.
4155 * The function will synchronize on the previous query barrier, and will
4156 * thus not finish until that barrier has executed.
4158 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
4159 * before calling this function.
4161 void __vmw_execbuf_release_pinned_bo(struct vmw_private
*dev_priv
,
4162 struct vmw_fence_obj
*fence
)
4165 struct list_head validate_list
;
4166 struct ttm_validate_buffer pinned_val
, query_val
;
4167 struct vmw_fence_obj
*lfence
= NULL
;
4168 struct ww_acquire_ctx ticket
;
4170 if (dev_priv
->pinned_bo
== NULL
)
4173 INIT_LIST_HEAD(&validate_list
);
4175 pinned_val
.bo
= ttm_bo_reference(&dev_priv
->pinned_bo
->base
);
4176 pinned_val
.shared
= false;
4177 list_add_tail(&pinned_val
.head
, &validate_list
);
4179 query_val
.bo
= ttm_bo_reference(&dev_priv
->dummy_query_bo
->base
);
4180 query_val
.shared
= false;
4181 list_add_tail(&query_val
.head
, &validate_list
);
4183 ret
= ttm_eu_reserve_buffers(&ticket
, &validate_list
,
4185 if (unlikely(ret
!= 0)) {
4186 vmw_execbuf_unpin_panic(dev_priv
);
4187 goto out_no_reserve
;
4190 if (dev_priv
->query_cid_valid
) {
4191 BUG_ON(fence
!= NULL
);
4192 ret
= vmw_fifo_emit_dummy_query(dev_priv
, dev_priv
->query_cid
);
4193 if (unlikely(ret
!= 0)) {
4194 vmw_execbuf_unpin_panic(dev_priv
);
4197 dev_priv
->query_cid_valid
= false;
4200 vmw_bo_pin_reserved(dev_priv
->pinned_bo
, false);
4201 if (dev_priv
->dummy_query_bo_pinned
) {
4202 vmw_bo_pin_reserved(dev_priv
->dummy_query_bo
, false);
4203 dev_priv
->dummy_query_bo_pinned
= false;
4205 if (fence
== NULL
) {
4206 (void) vmw_execbuf_fence_commands(NULL
, dev_priv
, &lfence
,
4210 ttm_eu_fence_buffer_objects(&ticket
, &validate_list
, (void *) fence
);
4212 vmw_fence_obj_unreference(&lfence
);
4214 ttm_bo_unref(&query_val
.bo
);
4215 ttm_bo_unref(&pinned_val
.bo
);
4216 vmw_dmabuf_unreference(&dev_priv
->pinned_bo
);
4217 DRM_INFO("Dummy query bo pin count: %d\n",
4218 dev_priv
->dummy_query_bo
->pin_count
);
4224 ttm_eu_backoff_reservation(&ticket
, &validate_list
);
4226 ttm_bo_unref(&query_val
.bo
);
4227 ttm_bo_unref(&pinned_val
.bo
);
4228 vmw_dmabuf_unreference(&dev_priv
->pinned_bo
);
4232 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4235 * @dev_priv: The device private structure.
4237 * This function should be used to unpin the pinned query bo, or
4238 * as a query barrier when we need to make sure that all queries have
4239 * finished before the next fifo command. (For example on hardware
4240 * context destructions where the hardware may otherwise leak unfinished
4243 * This function does not return any failure codes, but make attempts
4244 * to do safe unpinning in case of errors.
4246 * The function will synchronize on the previous query barrier, and will
4247 * thus not finish until that barrier has executed.
4249 void vmw_execbuf_release_pinned_bo(struct vmw_private
*dev_priv
)
4251 mutex_lock(&dev_priv
->cmdbuf_mutex
);
4252 if (dev_priv
->query_cid_valid
)
4253 __vmw_execbuf_release_pinned_bo(dev_priv
, NULL
);
4254 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
4257 int vmw_execbuf_ioctl(struct drm_device
*dev
, unsigned long data
,
4258 struct drm_file
*file_priv
, size_t size
)
4260 struct vmw_private
*dev_priv
= vmw_priv(dev
);
4261 struct drm_vmw_execbuf_arg arg
;
4263 static const size_t copy_offset
[] = {
4264 offsetof(struct drm_vmw_execbuf_arg
, context_handle
),
4265 sizeof(struct drm_vmw_execbuf_arg
)};
4267 if (unlikely(size
< copy_offset
[0])) {
4268 DRM_ERROR("Invalid command size, ioctl %d\n",
4273 if (copy_from_user(&arg
, (void __user
*) data
, copy_offset
[0]) != 0)
4277 * Extend the ioctl argument while
4278 * maintaining backwards compatibility:
4279 * We take different code paths depending on the value of
4283 if (unlikely(arg
.version
> DRM_VMW_EXECBUF_VERSION
||
4284 arg
.version
== 0)) {
4285 DRM_ERROR("Incorrect execbuf version.\n");
4289 if (arg
.version
> 1 &&
4290 copy_from_user(&arg
.context_handle
,
4291 (void __user
*) (data
+ copy_offset
[0]),
4292 copy_offset
[arg
.version
- 1] -
4293 copy_offset
[0]) != 0)
4296 switch (arg
.version
) {
4298 arg
.context_handle
= (uint32_t) -1;
4301 if (arg
.pad64
!= 0) {
4302 DRM_ERROR("Unused IOCTL data not set to zero.\n");
4310 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
4311 if (unlikely(ret
!= 0))
4314 ret
= vmw_execbuf_process(file_priv
, dev_priv
,
4315 (void __user
*)(unsigned long)arg
.commands
,
4316 NULL
, arg
.command_size
, arg
.throttle_us
,
4318 (void __user
*)(unsigned long)arg
.fence_rep
,
4320 ttm_read_unlock(&dev_priv
->reservation_sem
);
4321 if (unlikely(ret
!= 0))
4324 vmw_kms_cursor_post_execbuf(dev_priv
);