1 /**************************************************************************
3 * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
27 #include <linux/sync_file.h>
29 #include "vmwgfx_drv.h"
30 #include "vmwgfx_reg.h"
31 #include <drm/ttm/ttm_bo_api.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include "vmwgfx_so.h"
34 #include "vmwgfx_binding.h"
36 #define VMW_RES_HT_ORDER 12
39 * enum vmw_resource_relocation_type - Relocation type for resources
41 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
42 * command stream is replaced with the actual id after validation.
43 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
45 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
46 * after validation is -1, the command is replaced with a NOP. Otherwise no
49 enum vmw_resource_relocation_type
{
57 * struct vmw_resource_relocation - Relocation info for resources
59 * @head: List head for the software context's relocation list.
60 * @res: Non-ref-counted pointer to the resource.
61 * @offset: Offset of single byte entries into the command buffer where the
62 * id that needs fixup is located.
63 * @rel_type: Type of relocation.
65 struct vmw_resource_relocation
{
66 struct list_head head
;
67 const struct vmw_resource
*res
;
69 enum vmw_resource_relocation_type rel_type
:3;
73 * struct vmw_resource_val_node - Validation info for resources
75 * @head: List head for the software context's resource list.
76 * @hash: Hash entry for quick resouce to val_node lookup.
77 * @res: Ref-counted pointer to the resource.
78 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
79 * @new_backup: Refcounted pointer to the new backup buffer.
80 * @staged_bindings: If @res is a context, tracks bindings set up during
81 * the command batch. Otherwise NULL.
82 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
83 * @first_usage: Set to true the first time the resource is referenced in
85 * @switching_backup: The command stream provides a new backup buffer for a
87 * @no_buffer_needed: This means @switching_backup is true on first buffer
88 * reference. So resource reservation does not need to allocate a backup
89 * buffer for the resource.
91 struct vmw_resource_val_node
{
92 struct list_head head
;
93 struct drm_hash_item hash
;
94 struct vmw_resource
*res
;
95 struct vmw_dma_buffer
*new_backup
;
96 struct vmw_ctx_binding_state
*staged_bindings
;
97 unsigned long new_backup_offset
;
99 u32 switching_backup
: 1;
100 u32 no_buffer_needed
: 1;
104 * struct vmw_cmd_entry - Describe a command for the verifier
106 * @user_allow: Whether allowed from the execbuf ioctl.
107 * @gb_disable: Whether disabled if guest-backed objects are available.
108 * @gb_enable: Whether enabled iff guest-backed objects are available.
110 struct vmw_cmd_entry
{
111 int (*func
) (struct vmw_private
*, struct vmw_sw_context
*,
116 const char *cmd_name
;
119 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
120 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
121 (_gb_disable), (_gb_enable), #_cmd}
123 static int vmw_resource_context_res_add(struct vmw_private
*dev_priv
,
124 struct vmw_sw_context
*sw_context
,
125 struct vmw_resource
*ctx
);
126 static int vmw_translate_mob_ptr(struct vmw_private
*dev_priv
,
127 struct vmw_sw_context
*sw_context
,
129 struct vmw_dma_buffer
**vmw_bo_p
);
130 static int vmw_bo_to_validate_list(struct vmw_sw_context
*sw_context
,
131 struct vmw_dma_buffer
*vbo
,
132 bool validate_as_mob
,
133 uint32_t *p_val_node
);
135 * vmw_ptr_diff - Compute the offset from a to b in bytes
137 * @a: A starting pointer.
138 * @b: A pointer offset in the same address space.
140 * Returns: The offset in bytes between the two pointers.
142 static size_t vmw_ptr_diff(void *a
, void *b
)
144 return (unsigned long) b
- (unsigned long) a
;
148 * vmw_resources_unreserve - unreserve resources previously reserved for
149 * command submission.
151 * @sw_context: pointer to the software context
152 * @backoff: Whether command submission failed.
154 static void vmw_resources_unreserve(struct vmw_sw_context
*sw_context
,
157 struct vmw_resource_val_node
*val
;
158 struct list_head
*list
= &sw_context
->resource_list
;
160 if (sw_context
->dx_query_mob
&& !backoff
)
161 vmw_context_bind_dx_query(sw_context
->dx_query_ctx
,
162 sw_context
->dx_query_mob
);
164 list_for_each_entry(val
, list
, head
) {
165 struct vmw_resource
*res
= val
->res
;
167 (backoff
) ? false : val
->switching_backup
;
170 * Transfer staged context bindings to the
171 * persistent context binding tracker.
173 if (unlikely(val
->staged_bindings
)) {
175 vmw_binding_state_commit
176 (vmw_context_binding_state(val
->res
),
177 val
->staged_bindings
);
180 if (val
->staged_bindings
!= sw_context
->staged_bindings
)
181 vmw_binding_state_free(val
->staged_bindings
);
183 sw_context
->staged_bindings_inuse
= false;
184 val
->staged_bindings
= NULL
;
186 vmw_resource_unreserve(res
, switch_backup
, val
->new_backup
,
187 val
->new_backup_offset
);
188 vmw_dmabuf_unreference(&val
->new_backup
);
193 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
194 * added to the validate list.
196 * @dev_priv: Pointer to the device private:
197 * @sw_context: The validation context:
198 * @node: The validation node holding this context.
200 static int vmw_cmd_ctx_first_setup(struct vmw_private
*dev_priv
,
201 struct vmw_sw_context
*sw_context
,
202 struct vmw_resource_val_node
*node
)
206 ret
= vmw_resource_context_res_add(dev_priv
, sw_context
, node
->res
);
207 if (unlikely(ret
!= 0))
210 if (!sw_context
->staged_bindings
) {
211 sw_context
->staged_bindings
=
212 vmw_binding_state_alloc(dev_priv
);
213 if (IS_ERR(sw_context
->staged_bindings
)) {
214 DRM_ERROR("Failed to allocate context binding "
216 ret
= PTR_ERR(sw_context
->staged_bindings
);
217 sw_context
->staged_bindings
= NULL
;
222 if (sw_context
->staged_bindings_inuse
) {
223 node
->staged_bindings
= vmw_binding_state_alloc(dev_priv
);
224 if (IS_ERR(node
->staged_bindings
)) {
225 DRM_ERROR("Failed to allocate context binding "
227 ret
= PTR_ERR(node
->staged_bindings
);
228 node
->staged_bindings
= NULL
;
232 node
->staged_bindings
= sw_context
->staged_bindings
;
233 sw_context
->staged_bindings_inuse
= true;
242 * vmw_resource_val_add - Add a resource to the software context's
243 * resource list if it's not already on it.
245 * @sw_context: Pointer to the software context.
246 * @res: Pointer to the resource.
247 * @p_node On successful return points to a valid pointer to a
248 * struct vmw_resource_val_node, if non-NULL on entry.
250 static int vmw_resource_val_add(struct vmw_sw_context
*sw_context
,
251 struct vmw_resource
*res
,
252 struct vmw_resource_val_node
**p_node
)
254 struct vmw_private
*dev_priv
= res
->dev_priv
;
255 struct vmw_resource_val_node
*node
;
256 struct drm_hash_item
*hash
;
259 if (likely(drm_ht_find_item(&sw_context
->res_ht
, (unsigned long) res
,
261 node
= container_of(hash
, struct vmw_resource_val_node
, hash
);
262 node
->first_usage
= false;
263 if (unlikely(p_node
!= NULL
))
268 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
269 if (unlikely(!node
)) {
270 DRM_ERROR("Failed to allocate a resource validation "
275 node
->hash
.key
= (unsigned long) res
;
276 ret
= drm_ht_insert_item(&sw_context
->res_ht
, &node
->hash
);
277 if (unlikely(ret
!= 0)) {
278 DRM_ERROR("Failed to initialize a resource validation "
283 node
->res
= vmw_resource_reference(res
);
284 node
->first_usage
= true;
285 if (unlikely(p_node
!= NULL
))
288 if (!dev_priv
->has_mob
) {
289 list_add_tail(&node
->head
, &sw_context
->resource_list
);
293 switch (vmw_res_type(res
)) {
294 case vmw_res_context
:
295 case vmw_res_dx_context
:
296 list_add(&node
->head
, &sw_context
->ctx_resource_list
);
297 ret
= vmw_cmd_ctx_first_setup(dev_priv
, sw_context
, node
);
299 case vmw_res_cotable
:
300 list_add_tail(&node
->head
, &sw_context
->ctx_resource_list
);
303 list_add_tail(&node
->head
, &sw_context
->resource_list
);
311 * vmw_view_res_val_add - Add a view and the surface it's pointing to
312 * to the validation list
314 * @sw_context: The software context holding the validation list.
315 * @view: Pointer to the view resource.
317 * Returns 0 if success, negative error code otherwise.
319 static int vmw_view_res_val_add(struct vmw_sw_context
*sw_context
,
320 struct vmw_resource
*view
)
325 * First add the resource the view is pointing to, otherwise
326 * it may be swapped out when the view is validated.
328 ret
= vmw_resource_val_add(sw_context
, vmw_view_srf(view
), NULL
);
332 return vmw_resource_val_add(sw_context
, view
, NULL
);
336 * vmw_view_id_val_add - Look up a view and add it and the surface it's
337 * pointing to to the validation list.
339 * @sw_context: The software context holding the validation list.
340 * @view_type: The view type to look up.
341 * @id: view id of the view.
343 * The view is represented by a view id and the DX context it's created on,
344 * or scheduled for creation on. If there is no DX context set, the function
345 * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
347 static int vmw_view_id_val_add(struct vmw_sw_context
*sw_context
,
348 enum vmw_view_type view_type
, u32 id
)
350 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
351 struct vmw_resource
*view
;
355 DRM_ERROR("DX Context not set.\n");
359 view
= vmw_view_lookup(sw_context
->man
, view_type
, id
);
361 return PTR_ERR(view
);
363 ret
= vmw_view_res_val_add(sw_context
, view
);
364 vmw_resource_unreference(&view
);
370 * vmw_resource_context_res_add - Put resources previously bound to a context on
371 * the validation list
373 * @dev_priv: Pointer to a device private structure
374 * @sw_context: Pointer to a software context used for this command submission
375 * @ctx: Pointer to the context resource
377 * This function puts all resources that were previously bound to @ctx on
378 * the resource validation list. This is part of the context state reemission
380 static int vmw_resource_context_res_add(struct vmw_private
*dev_priv
,
381 struct vmw_sw_context
*sw_context
,
382 struct vmw_resource
*ctx
)
384 struct list_head
*binding_list
;
385 struct vmw_ctx_bindinfo
*entry
;
387 struct vmw_resource
*res
;
390 /* Add all cotables to the validation list. */
391 if (dev_priv
->has_dx
&& vmw_res_type(ctx
) == vmw_res_dx_context
) {
392 for (i
= 0; i
< SVGA_COTABLE_DX10_MAX
; ++i
) {
393 res
= vmw_context_cotable(ctx
, i
);
397 ret
= vmw_resource_val_add(sw_context
, res
, NULL
);
398 vmw_resource_unreference(&res
);
399 if (unlikely(ret
!= 0))
405 /* Add all resources bound to the context to the validation list */
406 mutex_lock(&dev_priv
->binding_mutex
);
407 binding_list
= vmw_context_binding_list(ctx
);
409 list_for_each_entry(entry
, binding_list
, ctx_list
) {
410 /* entry->res is not refcounted */
411 res
= vmw_resource_reference_unless_doomed(entry
->res
);
412 if (unlikely(res
== NULL
))
415 if (vmw_res_type(entry
->res
) == vmw_res_view
)
416 ret
= vmw_view_res_val_add(sw_context
, entry
->res
);
418 ret
= vmw_resource_val_add(sw_context
, entry
->res
,
420 vmw_resource_unreference(&res
);
421 if (unlikely(ret
!= 0))
425 if (dev_priv
->has_dx
&& vmw_res_type(ctx
) == vmw_res_dx_context
) {
426 struct vmw_dma_buffer
*dx_query_mob
;
428 dx_query_mob
= vmw_context_get_dx_query_mob(ctx
);
430 ret
= vmw_bo_to_validate_list(sw_context
,
435 mutex_unlock(&dev_priv
->binding_mutex
);
440 * vmw_resource_relocation_add - Add a relocation to the relocation list
442 * @list: Pointer to head of relocation list.
443 * @res: The resource.
444 * @offset: Offset into the command buffer currently being parsed where the
445 * id that needs fixup is located. Granularity is one byte.
446 * @rel_type: Relocation type.
448 static int vmw_resource_relocation_add(struct list_head
*list
,
449 const struct vmw_resource
*res
,
450 unsigned long offset
,
451 enum vmw_resource_relocation_type
454 struct vmw_resource_relocation
*rel
;
456 rel
= kmalloc(sizeof(*rel
), GFP_KERNEL
);
457 if (unlikely(!rel
)) {
458 DRM_ERROR("Failed to allocate a resource relocation.\n");
463 rel
->offset
= offset
;
464 rel
->rel_type
= rel_type
;
465 list_add_tail(&rel
->head
, list
);
471 * vmw_resource_relocations_free - Free all relocations on a list
473 * @list: Pointer to the head of the relocation list.
475 static void vmw_resource_relocations_free(struct list_head
*list
)
477 struct vmw_resource_relocation
*rel
, *n
;
479 list_for_each_entry_safe(rel
, n
, list
, head
) {
480 list_del(&rel
->head
);
486 * vmw_resource_relocations_apply - Apply all relocations on a list
488 * @cb: Pointer to the start of the command buffer bein patch. This need
489 * not be the same buffer as the one being parsed when the relocation
490 * list was built, but the contents must be the same modulo the
492 * @list: Pointer to the head of the relocation list.
494 static void vmw_resource_relocations_apply(uint32_t *cb
,
495 struct list_head
*list
)
497 struct vmw_resource_relocation
*rel
;
499 /* Validate the struct vmw_resource_relocation member size */
500 BUILD_BUG_ON(SVGA_CB_MAX_SIZE
>= (1 << 29));
501 BUILD_BUG_ON(vmw_res_rel_max
>= (1 << 3));
503 list_for_each_entry(rel
, list
, head
) {
504 u32
*addr
= (u32
*)((unsigned long) cb
+ rel
->offset
);
505 switch (rel
->rel_type
) {
506 case vmw_res_rel_normal
:
507 *addr
= rel
->res
->id
;
509 case vmw_res_rel_nop
:
510 *addr
= SVGA_3D_CMD_NOP
;
513 if (rel
->res
->id
== -1)
514 *addr
= SVGA_3D_CMD_NOP
;
520 static int vmw_cmd_invalid(struct vmw_private
*dev_priv
,
521 struct vmw_sw_context
*sw_context
,
522 SVGA3dCmdHeader
*header
)
527 static int vmw_cmd_ok(struct vmw_private
*dev_priv
,
528 struct vmw_sw_context
*sw_context
,
529 SVGA3dCmdHeader
*header
)
535 * vmw_bo_to_validate_list - add a bo to a validate list
537 * @sw_context: The software context used for this command submission batch.
538 * @bo: The buffer object to add.
539 * @validate_as_mob: Validate this buffer as a MOB.
540 * @p_val_node: If non-NULL Will be updated with the validate node number
543 * Returns -EINVAL if the limit of number of buffer objects per command
544 * submission is reached.
546 static int vmw_bo_to_validate_list(struct vmw_sw_context
*sw_context
,
547 struct vmw_dma_buffer
*vbo
,
548 bool validate_as_mob
,
549 uint32_t *p_val_node
)
552 struct vmw_validate_buffer
*vval_buf
;
553 struct ttm_validate_buffer
*val_buf
;
554 struct drm_hash_item
*hash
;
557 if (likely(drm_ht_find_item(&sw_context
->res_ht
, (unsigned long) vbo
,
559 vval_buf
= container_of(hash
, struct vmw_validate_buffer
,
561 if (unlikely(vval_buf
->validate_as_mob
!= validate_as_mob
)) {
562 DRM_ERROR("Inconsistent buffer usage.\n");
565 val_buf
= &vval_buf
->base
;
566 val_node
= vval_buf
- sw_context
->val_bufs
;
568 val_node
= sw_context
->cur_val_buf
;
569 if (unlikely(val_node
>= VMWGFX_MAX_VALIDATIONS
)) {
570 DRM_ERROR("Max number of DMA buffers per submission "
574 vval_buf
= &sw_context
->val_bufs
[val_node
];
575 vval_buf
->hash
.key
= (unsigned long) vbo
;
576 ret
= drm_ht_insert_item(&sw_context
->res_ht
, &vval_buf
->hash
);
577 if (unlikely(ret
!= 0)) {
578 DRM_ERROR("Failed to initialize a buffer validation "
582 ++sw_context
->cur_val_buf
;
583 val_buf
= &vval_buf
->base
;
584 val_buf
->bo
= ttm_bo_reference(&vbo
->base
);
585 val_buf
->shared
= false;
586 list_add_tail(&val_buf
->head
, &sw_context
->validate_nodes
);
587 vval_buf
->validate_as_mob
= validate_as_mob
;
591 *p_val_node
= val_node
;
597 * vmw_resources_reserve - Reserve all resources on the sw_context's
600 * @sw_context: Pointer to the software context.
602 * Note that since vmware's command submission currently is protected by
603 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
604 * since only a single thread at once will attempt this.
606 static int vmw_resources_reserve(struct vmw_sw_context
*sw_context
)
608 struct vmw_resource_val_node
*val
;
611 list_for_each_entry(val
, &sw_context
->resource_list
, head
) {
612 struct vmw_resource
*res
= val
->res
;
614 ret
= vmw_resource_reserve(res
, true, val
->no_buffer_needed
);
615 if (unlikely(ret
!= 0))
619 struct vmw_dma_buffer
*vbo
= res
->backup
;
621 ret
= vmw_bo_to_validate_list
623 vmw_resource_needs_backup(res
), NULL
);
625 if (unlikely(ret
!= 0))
630 if (sw_context
->dx_query_mob
) {
631 struct vmw_dma_buffer
*expected_dx_query_mob
;
633 expected_dx_query_mob
=
634 vmw_context_get_dx_query_mob(sw_context
->dx_query_ctx
);
635 if (expected_dx_query_mob
&&
636 expected_dx_query_mob
!= sw_context
->dx_query_mob
) {
645 * vmw_resources_validate - Validate all resources on the sw_context's
648 * @sw_context: Pointer to the software context.
650 * Before this function is called, all resource backup buffers must have
653 static int vmw_resources_validate(struct vmw_sw_context
*sw_context
)
655 struct vmw_resource_val_node
*val
;
658 list_for_each_entry(val
, &sw_context
->resource_list
, head
) {
659 struct vmw_resource
*res
= val
->res
;
660 struct vmw_dma_buffer
*backup
= res
->backup
;
662 ret
= vmw_resource_validate(res
);
663 if (unlikely(ret
!= 0)) {
664 if (ret
!= -ERESTARTSYS
)
665 DRM_ERROR("Failed to validate resource.\n");
669 /* Check if the resource switched backup buffer */
670 if (backup
&& res
->backup
&& (backup
!= res
->backup
)) {
671 struct vmw_dma_buffer
*vbo
= res
->backup
;
673 ret
= vmw_bo_to_validate_list
675 vmw_resource_needs_backup(res
), NULL
);
677 ttm_bo_unreserve(&vbo
->base
);
686 * vmw_cmd_res_reloc_add - Add a resource to a software context's
687 * relocation- and validation lists.
689 * @dev_priv: Pointer to a struct vmw_private identifying the device.
690 * @sw_context: Pointer to the software context.
691 * @id_loc: Pointer to where the id that needs translation is located.
692 * @res: Valid pointer to a struct vmw_resource.
693 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
694 * used for this resource is returned here.
696 static int vmw_cmd_res_reloc_add(struct vmw_private
*dev_priv
,
697 struct vmw_sw_context
*sw_context
,
699 struct vmw_resource
*res
,
700 struct vmw_resource_val_node
**p_val
)
703 struct vmw_resource_val_node
*node
;
706 ret
= vmw_resource_relocation_add(&sw_context
->res_relocations
,
708 vmw_ptr_diff(sw_context
->buf_start
,
711 if (unlikely(ret
!= 0))
714 ret
= vmw_resource_val_add(sw_context
, res
, &node
);
715 if (unlikely(ret
!= 0))
726 * vmw_cmd_res_check - Check that a resource is present and if so, put it
727 * on the resource validate list unless it's already there.
729 * @dev_priv: Pointer to a device private structure.
730 * @sw_context: Pointer to the software context.
731 * @res_type: Resource type.
732 * @converter: User-space visisble type specific information.
733 * @id_loc: Pointer to the location in the command buffer currently being
734 * parsed from where the user-space resource id handle is located.
735 * @p_val: Pointer to pointer to resource validalidation node. Populated
739 vmw_cmd_res_check(struct vmw_private
*dev_priv
,
740 struct vmw_sw_context
*sw_context
,
741 enum vmw_res_type res_type
,
742 const struct vmw_user_resource_conv
*converter
,
744 struct vmw_resource_val_node
**p_val
)
746 struct vmw_res_cache_entry
*rcache
=
747 &sw_context
->res_cache
[res_type
];
748 struct vmw_resource
*res
;
749 struct vmw_resource_val_node
*node
;
752 if (*id_loc
== SVGA3D_INVALID_ID
) {
755 if (res_type
== vmw_res_context
) {
756 DRM_ERROR("Illegal context invalid id.\n");
763 * Fastpath in case of repeated commands referencing the same
767 if (likely(rcache
->valid
&& *id_loc
== rcache
->handle
)) {
768 const struct vmw_resource
*res
= rcache
->res
;
770 rcache
->node
->first_usage
= false;
772 *p_val
= rcache
->node
;
774 return vmw_resource_relocation_add
775 (&sw_context
->res_relocations
, res
,
776 vmw_ptr_diff(sw_context
->buf_start
, id_loc
),
780 ret
= vmw_user_resource_lookup_handle(dev_priv
,
781 sw_context
->fp
->tfile
,
785 if (unlikely(ret
!= 0)) {
786 DRM_ERROR("Could not find or use resource 0x%08x.\n",
792 rcache
->valid
= true;
794 rcache
->handle
= *id_loc
;
796 ret
= vmw_cmd_res_reloc_add(dev_priv
, sw_context
, id_loc
,
798 if (unlikely(ret
!= 0))
804 vmw_resource_unreference(&res
);
808 BUG_ON(sw_context
->error_resource
!= NULL
);
809 sw_context
->error_resource
= res
;
815 * vmw_rebind_dx_query - Rebind DX query associated with the context
817 * @ctx_res: context the query belongs to
819 * This function assumes binding_mutex is held.
821 static int vmw_rebind_all_dx_query(struct vmw_resource
*ctx_res
)
823 struct vmw_private
*dev_priv
= ctx_res
->dev_priv
;
824 struct vmw_dma_buffer
*dx_query_mob
;
826 SVGA3dCmdHeader header
;
827 SVGA3dCmdDXBindAllQuery body
;
831 dx_query_mob
= vmw_context_get_dx_query_mob(ctx_res
);
833 if (!dx_query_mob
|| dx_query_mob
->dx_query_ctx
)
836 cmd
= vmw_fifo_reserve_dx(dev_priv
, sizeof(*cmd
), ctx_res
->id
);
839 DRM_ERROR("Failed to rebind queries.\n");
843 cmd
->header
.id
= SVGA_3D_CMD_DX_BIND_ALL_QUERY
;
844 cmd
->header
.size
= sizeof(cmd
->body
);
845 cmd
->body
.cid
= ctx_res
->id
;
846 cmd
->body
.mobid
= dx_query_mob
->base
.mem
.start
;
847 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
849 vmw_context_bind_dx_query(ctx_res
, dx_query_mob
);
855 * vmw_rebind_contexts - Rebind all resources previously bound to
856 * referenced contexts.
858 * @sw_context: Pointer to the software context.
860 * Rebind context binding points that have been scrubbed because of eviction.
862 static int vmw_rebind_contexts(struct vmw_sw_context
*sw_context
)
864 struct vmw_resource_val_node
*val
;
867 list_for_each_entry(val
, &sw_context
->resource_list
, head
) {
868 if (unlikely(!val
->staged_bindings
))
871 ret
= vmw_binding_rebind_all
872 (vmw_context_binding_state(val
->res
));
873 if (unlikely(ret
!= 0)) {
874 if (ret
!= -ERESTARTSYS
)
875 DRM_ERROR("Failed to rebind context.\n");
879 ret
= vmw_rebind_all_dx_query(val
->res
);
888 * vmw_view_bindings_add - Add an array of view bindings to a context
889 * binding state tracker.
891 * @sw_context: The execbuf state used for this command.
892 * @view_type: View type for the bindings.
893 * @binding_type: Binding type for the bindings.
894 * @shader_slot: The shader slot to user for the bindings.
895 * @view_ids: Array of view ids to be bound.
896 * @num_views: Number of view ids in @view_ids.
897 * @first_slot: The binding slot to be used for the first view id in @view_ids.
899 static int vmw_view_bindings_add(struct vmw_sw_context
*sw_context
,
900 enum vmw_view_type view_type
,
901 enum vmw_ctx_binding_type binding_type
,
903 uint32 view_ids
[], u32 num_views
,
906 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
907 struct vmw_cmdbuf_res_manager
*man
;
912 DRM_ERROR("DX Context not set.\n");
916 man
= sw_context
->man
;
917 for (i
= 0; i
< num_views
; ++i
) {
918 struct vmw_ctx_bindinfo_view binding
;
919 struct vmw_resource
*view
= NULL
;
921 if (view_ids
[i
] != SVGA3D_INVALID_ID
) {
922 view
= vmw_view_lookup(man
, view_type
, view_ids
[i
]);
924 DRM_ERROR("View not found.\n");
925 return PTR_ERR(view
);
928 ret
= vmw_view_res_val_add(sw_context
, view
);
930 DRM_ERROR("Could not add view to "
931 "validation list.\n");
932 vmw_resource_unreference(&view
);
936 binding
.bi
.ctx
= ctx_node
->res
;
937 binding
.bi
.res
= view
;
938 binding
.bi
.bt
= binding_type
;
939 binding
.shader_slot
= shader_slot
;
940 binding
.slot
= first_slot
+ i
;
941 vmw_binding_add(ctx_node
->staged_bindings
, &binding
.bi
,
942 shader_slot
, binding
.slot
);
944 vmw_resource_unreference(&view
);
951 * vmw_cmd_cid_check - Check a command header for valid context information.
953 * @dev_priv: Pointer to a device private structure.
954 * @sw_context: Pointer to the software context.
955 * @header: A command header with an embedded user-space context handle.
957 * Convenience function: Call vmw_cmd_res_check with the user-space context
958 * handle embedded in @header.
960 static int vmw_cmd_cid_check(struct vmw_private
*dev_priv
,
961 struct vmw_sw_context
*sw_context
,
962 SVGA3dCmdHeader
*header
)
965 SVGA3dCmdHeader header
;
969 cmd
= container_of(header
, struct vmw_cid_cmd
, header
);
970 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
971 user_context_converter
, &cmd
->cid
, NULL
);
974 static int vmw_cmd_set_render_target_check(struct vmw_private
*dev_priv
,
975 struct vmw_sw_context
*sw_context
,
976 SVGA3dCmdHeader
*header
)
979 SVGA3dCmdHeader header
;
980 SVGA3dCmdSetRenderTarget body
;
982 struct vmw_resource_val_node
*ctx_node
;
983 struct vmw_resource_val_node
*res_node
;
986 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
988 if (cmd
->body
.type
>= SVGA3D_RT_MAX
) {
989 DRM_ERROR("Illegal render target type %u.\n",
990 (unsigned) cmd
->body
.type
);
994 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
995 user_context_converter
, &cmd
->body
.cid
,
997 if (unlikely(ret
!= 0))
1000 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1001 user_surface_converter
,
1002 &cmd
->body
.target
.sid
, &res_node
);
1003 if (unlikely(ret
!= 0))
1006 if (dev_priv
->has_mob
) {
1007 struct vmw_ctx_bindinfo_view binding
;
1009 binding
.bi
.ctx
= ctx_node
->res
;
1010 binding
.bi
.res
= res_node
? res_node
->res
: NULL
;
1011 binding
.bi
.bt
= vmw_ctx_binding_rt
;
1012 binding
.slot
= cmd
->body
.type
;
1013 vmw_binding_add(ctx_node
->staged_bindings
,
1014 &binding
.bi
, 0, binding
.slot
);
1020 static int vmw_cmd_surface_copy_check(struct vmw_private
*dev_priv
,
1021 struct vmw_sw_context
*sw_context
,
1022 SVGA3dCmdHeader
*header
)
1024 struct vmw_sid_cmd
{
1025 SVGA3dCmdHeader header
;
1026 SVGA3dCmdSurfaceCopy body
;
1030 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
1032 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1033 user_surface_converter
,
1034 &cmd
->body
.src
.sid
, NULL
);
1038 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1039 user_surface_converter
,
1040 &cmd
->body
.dest
.sid
, NULL
);
1043 static int vmw_cmd_buffer_copy_check(struct vmw_private
*dev_priv
,
1044 struct vmw_sw_context
*sw_context
,
1045 SVGA3dCmdHeader
*header
)
1048 SVGA3dCmdHeader header
;
1049 SVGA3dCmdDXBufferCopy body
;
1053 cmd
= container_of(header
, typeof(*cmd
), header
);
1054 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1055 user_surface_converter
,
1056 &cmd
->body
.src
, NULL
);
1060 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1061 user_surface_converter
,
1062 &cmd
->body
.dest
, NULL
);
1065 static int vmw_cmd_pred_copy_check(struct vmw_private
*dev_priv
,
1066 struct vmw_sw_context
*sw_context
,
1067 SVGA3dCmdHeader
*header
)
1070 SVGA3dCmdHeader header
;
1071 SVGA3dCmdDXPredCopyRegion body
;
1075 cmd
= container_of(header
, typeof(*cmd
), header
);
1076 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1077 user_surface_converter
,
1078 &cmd
->body
.srcSid
, NULL
);
1082 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1083 user_surface_converter
,
1084 &cmd
->body
.dstSid
, NULL
);
1087 static int vmw_cmd_stretch_blt_check(struct vmw_private
*dev_priv
,
1088 struct vmw_sw_context
*sw_context
,
1089 SVGA3dCmdHeader
*header
)
1091 struct vmw_sid_cmd
{
1092 SVGA3dCmdHeader header
;
1093 SVGA3dCmdSurfaceStretchBlt body
;
1097 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
1098 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1099 user_surface_converter
,
1100 &cmd
->body
.src
.sid
, NULL
);
1101 if (unlikely(ret
!= 0))
1103 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1104 user_surface_converter
,
1105 &cmd
->body
.dest
.sid
, NULL
);
1108 static int vmw_cmd_blt_surf_screen_check(struct vmw_private
*dev_priv
,
1109 struct vmw_sw_context
*sw_context
,
1110 SVGA3dCmdHeader
*header
)
1112 struct vmw_sid_cmd
{
1113 SVGA3dCmdHeader header
;
1114 SVGA3dCmdBlitSurfaceToScreen body
;
1117 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
1119 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1120 user_surface_converter
,
1121 &cmd
->body
.srcImage
.sid
, NULL
);
1124 static int vmw_cmd_present_check(struct vmw_private
*dev_priv
,
1125 struct vmw_sw_context
*sw_context
,
1126 SVGA3dCmdHeader
*header
)
1128 struct vmw_sid_cmd
{
1129 SVGA3dCmdHeader header
;
1130 SVGA3dCmdPresent body
;
1134 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
1136 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1137 user_surface_converter
, &cmd
->body
.sid
,
1142 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1144 * @dev_priv: The device private structure.
1145 * @new_query_bo: The new buffer holding query results.
1146 * @sw_context: The software context used for this command submission.
1148 * This function checks whether @new_query_bo is suitable for holding
1149 * query results, and if another buffer currently is pinned for query
1150 * results. If so, the function prepares the state of @sw_context for
1151 * switching pinned buffers after successful submission of the current
1154 static int vmw_query_bo_switch_prepare(struct vmw_private
*dev_priv
,
1155 struct vmw_dma_buffer
*new_query_bo
,
1156 struct vmw_sw_context
*sw_context
)
1158 struct vmw_res_cache_entry
*ctx_entry
=
1159 &sw_context
->res_cache
[vmw_res_context
];
1162 BUG_ON(!ctx_entry
->valid
);
1163 sw_context
->last_query_ctx
= ctx_entry
->res
;
1165 if (unlikely(new_query_bo
!= sw_context
->cur_query_bo
)) {
1167 if (unlikely(new_query_bo
->base
.num_pages
> 4)) {
1168 DRM_ERROR("Query buffer too large.\n");
1172 if (unlikely(sw_context
->cur_query_bo
!= NULL
)) {
1173 sw_context
->needs_post_query_barrier
= true;
1174 ret
= vmw_bo_to_validate_list(sw_context
,
1175 sw_context
->cur_query_bo
,
1176 dev_priv
->has_mob
, NULL
);
1177 if (unlikely(ret
!= 0))
1180 sw_context
->cur_query_bo
= new_query_bo
;
1182 ret
= vmw_bo_to_validate_list(sw_context
,
1183 dev_priv
->dummy_query_bo
,
1184 dev_priv
->has_mob
, NULL
);
1185 if (unlikely(ret
!= 0))
1195 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1197 * @dev_priv: The device private structure.
1198 * @sw_context: The software context used for this command submission batch.
1200 * This function will check if we're switching query buffers, and will then,
1201 * issue a dummy occlusion query wait used as a query barrier. When the fence
1202 * object following that query wait has signaled, we are sure that all
1203 * preceding queries have finished, and the old query buffer can be unpinned.
1204 * However, since both the new query buffer and the old one are fenced with
1205 * that fence, we can do an asynchronus unpin now, and be sure that the
1206 * old query buffer won't be moved until the fence has signaled.
1208 * As mentioned above, both the new - and old query buffers need to be fenced
1209 * using a sequence emitted *after* calling this function.
1211 static void vmw_query_bo_switch_commit(struct vmw_private
*dev_priv
,
1212 struct vmw_sw_context
*sw_context
)
1215 * The validate list should still hold references to all
1219 if (sw_context
->needs_post_query_barrier
) {
1220 struct vmw_res_cache_entry
*ctx_entry
=
1221 &sw_context
->res_cache
[vmw_res_context
];
1222 struct vmw_resource
*ctx
;
1225 BUG_ON(!ctx_entry
->valid
);
1226 ctx
= ctx_entry
->res
;
1228 ret
= vmw_fifo_emit_dummy_query(dev_priv
, ctx
->id
);
1230 if (unlikely(ret
!= 0))
1231 DRM_ERROR("Out of fifo space for dummy query.\n");
1234 if (dev_priv
->pinned_bo
!= sw_context
->cur_query_bo
) {
1235 if (dev_priv
->pinned_bo
) {
1236 vmw_bo_pin_reserved(dev_priv
->pinned_bo
, false);
1237 vmw_dmabuf_unreference(&dev_priv
->pinned_bo
);
1240 if (!sw_context
->needs_post_query_barrier
) {
1241 vmw_bo_pin_reserved(sw_context
->cur_query_bo
, true);
1244 * We pin also the dummy_query_bo buffer so that we
1245 * don't need to validate it when emitting
1246 * dummy queries in context destroy paths.
1249 if (!dev_priv
->dummy_query_bo_pinned
) {
1250 vmw_bo_pin_reserved(dev_priv
->dummy_query_bo
,
1252 dev_priv
->dummy_query_bo_pinned
= true;
1255 BUG_ON(sw_context
->last_query_ctx
== NULL
);
1256 dev_priv
->query_cid
= sw_context
->last_query_ctx
->id
;
1257 dev_priv
->query_cid_valid
= true;
1258 dev_priv
->pinned_bo
=
1259 vmw_dmabuf_reference(sw_context
->cur_query_bo
);
1265 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1266 * handle to a MOB id.
1268 * @dev_priv: Pointer to a device private structure.
1269 * @sw_context: The software context used for this command batch validation.
1270 * @id: Pointer to the user-space handle to be translated.
1271 * @vmw_bo_p: Points to a location that, on successful return will carry
1272 * a reference-counted pointer to the DMA buffer identified by the
1273 * user-space handle in @id.
1275 * This function saves information needed to translate a user-space buffer
1276 * handle to a MOB id. The translation does not take place immediately, but
1277 * during a call to vmw_apply_relocations(). This function builds a relocation
1278 * list and a list of buffers to validate. The former needs to be freed using
1279 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1280 * needs to be freed using vmw_clear_validations.
1282 static int vmw_translate_mob_ptr(struct vmw_private
*dev_priv
,
1283 struct vmw_sw_context
*sw_context
,
1285 struct vmw_dma_buffer
**vmw_bo_p
)
1287 struct vmw_dma_buffer
*vmw_bo
= NULL
;
1288 uint32_t handle
= *id
;
1289 struct vmw_relocation
*reloc
;
1292 ret
= vmw_user_dmabuf_lookup(sw_context
->fp
->tfile
, handle
, &vmw_bo
,
1294 if (unlikely(ret
!= 0)) {
1295 DRM_ERROR("Could not find or use MOB buffer.\n");
1300 if (unlikely(sw_context
->cur_reloc
>= VMWGFX_MAX_RELOCATIONS
)) {
1301 DRM_ERROR("Max number relocations per submission"
1307 reloc
= &sw_context
->relocs
[sw_context
->cur_reloc
++];
1308 reloc
->mob_loc
= id
;
1309 reloc
->location
= NULL
;
1311 ret
= vmw_bo_to_validate_list(sw_context
, vmw_bo
, true, &reloc
->index
);
1312 if (unlikely(ret
!= 0))
1319 vmw_dmabuf_unreference(&vmw_bo
);
1325 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1326 * handle to a valid SVGAGuestPtr
1328 * @dev_priv: Pointer to a device private structure.
1329 * @sw_context: The software context used for this command batch validation.
1330 * @ptr: Pointer to the user-space handle to be translated.
1331 * @vmw_bo_p: Points to a location that, on successful return will carry
1332 * a reference-counted pointer to the DMA buffer identified by the
1333 * user-space handle in @id.
1335 * This function saves information needed to translate a user-space buffer
1336 * handle to a valid SVGAGuestPtr. The translation does not take place
1337 * immediately, but during a call to vmw_apply_relocations().
1338 * This function builds a relocation list and a list of buffers to validate.
1339 * The former needs to be freed using either vmw_apply_relocations() or
1340 * vmw_free_relocations(). The latter needs to be freed using
1341 * vmw_clear_validations.
1343 static int vmw_translate_guest_ptr(struct vmw_private
*dev_priv
,
1344 struct vmw_sw_context
*sw_context
,
1346 struct vmw_dma_buffer
**vmw_bo_p
)
1348 struct vmw_dma_buffer
*vmw_bo
= NULL
;
1349 uint32_t handle
= ptr
->gmrId
;
1350 struct vmw_relocation
*reloc
;
1353 ret
= vmw_user_dmabuf_lookup(sw_context
->fp
->tfile
, handle
, &vmw_bo
,
1355 if (unlikely(ret
!= 0)) {
1356 DRM_ERROR("Could not find or use GMR region.\n");
1361 if (unlikely(sw_context
->cur_reloc
>= VMWGFX_MAX_RELOCATIONS
)) {
1362 DRM_ERROR("Max number relocations per submission"
1368 reloc
= &sw_context
->relocs
[sw_context
->cur_reloc
++];
1369 reloc
->location
= ptr
;
1371 ret
= vmw_bo_to_validate_list(sw_context
, vmw_bo
, false, &reloc
->index
);
1372 if (unlikely(ret
!= 0))
1379 vmw_dmabuf_unreference(&vmw_bo
);
1387 * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
1389 * @dev_priv: Pointer to a device private struct.
1390 * @sw_context: The software context used for this command submission.
1391 * @header: Pointer to the command header in the command stream.
1393 * This function adds the new query into the query COTABLE
1395 static int vmw_cmd_dx_define_query(struct vmw_private
*dev_priv
,
1396 struct vmw_sw_context
*sw_context
,
1397 SVGA3dCmdHeader
*header
)
1399 struct vmw_dx_define_query_cmd
{
1400 SVGA3dCmdHeader header
;
1401 SVGA3dCmdDXDefineQuery q
;
1405 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
1406 struct vmw_resource
*cotable_res
;
1409 if (ctx_node
== NULL
) {
1410 DRM_ERROR("DX Context not set for query.\n");
1414 cmd
= container_of(header
, struct vmw_dx_define_query_cmd
, header
);
1416 if (cmd
->q
.type
< SVGA3D_QUERYTYPE_MIN
||
1417 cmd
->q
.type
>= SVGA3D_QUERYTYPE_MAX
)
1420 cotable_res
= vmw_context_cotable(ctx_node
->res
, SVGA_COTABLE_DXQUERY
);
1421 ret
= vmw_cotable_notify(cotable_res
, cmd
->q
.queryId
);
1422 vmw_resource_unreference(&cotable_res
);
1430 * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
1432 * @dev_priv: Pointer to a device private struct.
1433 * @sw_context: The software context used for this command submission.
1434 * @header: Pointer to the command header in the command stream.
1436 * The query bind operation will eventually associate the query ID
1437 * with its backing MOB. In this function, we take the user mode
1438 * MOB ID and use vmw_translate_mob_ptr() to translate it to its
1439 * kernel mode equivalent.
1441 static int vmw_cmd_dx_bind_query(struct vmw_private
*dev_priv
,
1442 struct vmw_sw_context
*sw_context
,
1443 SVGA3dCmdHeader
*header
)
1445 struct vmw_dx_bind_query_cmd
{
1446 SVGA3dCmdHeader header
;
1447 SVGA3dCmdDXBindQuery q
;
1450 struct vmw_dma_buffer
*vmw_bo
;
1454 cmd
= container_of(header
, struct vmw_dx_bind_query_cmd
, header
);
1457 * Look up the buffer pointed to by q.mobid, put it on the relocation
1458 * list so its kernel mode MOB ID can be filled in later
1460 ret
= vmw_translate_mob_ptr(dev_priv
, sw_context
, &cmd
->q
.mobid
,
1466 sw_context
->dx_query_mob
= vmw_bo
;
1467 sw_context
->dx_query_ctx
= sw_context
->dx_ctx_node
->res
;
1469 vmw_dmabuf_unreference(&vmw_bo
);
1477 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
1479 * @dev_priv: Pointer to a device private struct.
1480 * @sw_context: The software context used for this command submission.
1481 * @header: Pointer to the command header in the command stream.
1483 static int vmw_cmd_begin_gb_query(struct vmw_private
*dev_priv
,
1484 struct vmw_sw_context
*sw_context
,
1485 SVGA3dCmdHeader
*header
)
1487 struct vmw_begin_gb_query_cmd
{
1488 SVGA3dCmdHeader header
;
1489 SVGA3dCmdBeginGBQuery q
;
1492 cmd
= container_of(header
, struct vmw_begin_gb_query_cmd
,
1495 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
1496 user_context_converter
, &cmd
->q
.cid
,
1501 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
1503 * @dev_priv: Pointer to a device private struct.
1504 * @sw_context: The software context used for this command submission.
1505 * @header: Pointer to the command header in the command stream.
1507 static int vmw_cmd_begin_query(struct vmw_private
*dev_priv
,
1508 struct vmw_sw_context
*sw_context
,
1509 SVGA3dCmdHeader
*header
)
1511 struct vmw_begin_query_cmd
{
1512 SVGA3dCmdHeader header
;
1513 SVGA3dCmdBeginQuery q
;
1516 cmd
= container_of(header
, struct vmw_begin_query_cmd
,
1519 if (unlikely(dev_priv
->has_mob
)) {
1521 SVGA3dCmdHeader header
;
1522 SVGA3dCmdBeginGBQuery q
;
1525 BUG_ON(sizeof(gb_cmd
) != sizeof(*cmd
));
1527 gb_cmd
.header
.id
= SVGA_3D_CMD_BEGIN_GB_QUERY
;
1528 gb_cmd
.header
.size
= cmd
->header
.size
;
1529 gb_cmd
.q
.cid
= cmd
->q
.cid
;
1530 gb_cmd
.q
.type
= cmd
->q
.type
;
1532 memcpy(cmd
, &gb_cmd
, sizeof(*cmd
));
1533 return vmw_cmd_begin_gb_query(dev_priv
, sw_context
, header
);
1536 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
1537 user_context_converter
, &cmd
->q
.cid
,
1542 * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
1544 * @dev_priv: Pointer to a device private struct.
1545 * @sw_context: The software context used for this command submission.
1546 * @header: Pointer to the command header in the command stream.
1548 static int vmw_cmd_end_gb_query(struct vmw_private
*dev_priv
,
1549 struct vmw_sw_context
*sw_context
,
1550 SVGA3dCmdHeader
*header
)
1552 struct vmw_dma_buffer
*vmw_bo
;
1553 struct vmw_query_cmd
{
1554 SVGA3dCmdHeader header
;
1555 SVGA3dCmdEndGBQuery q
;
1559 cmd
= container_of(header
, struct vmw_query_cmd
, header
);
1560 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1561 if (unlikely(ret
!= 0))
1564 ret
= vmw_translate_mob_ptr(dev_priv
, sw_context
,
1567 if (unlikely(ret
!= 0))
1570 ret
= vmw_query_bo_switch_prepare(dev_priv
, vmw_bo
, sw_context
);
1572 vmw_dmabuf_unreference(&vmw_bo
);
1577 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
1579 * @dev_priv: Pointer to a device private struct.
1580 * @sw_context: The software context used for this command submission.
1581 * @header: Pointer to the command header in the command stream.
1583 static int vmw_cmd_end_query(struct vmw_private
*dev_priv
,
1584 struct vmw_sw_context
*sw_context
,
1585 SVGA3dCmdHeader
*header
)
1587 struct vmw_dma_buffer
*vmw_bo
;
1588 struct vmw_query_cmd
{
1589 SVGA3dCmdHeader header
;
1590 SVGA3dCmdEndQuery q
;
1594 cmd
= container_of(header
, struct vmw_query_cmd
, header
);
1595 if (dev_priv
->has_mob
) {
1597 SVGA3dCmdHeader header
;
1598 SVGA3dCmdEndGBQuery q
;
1601 BUG_ON(sizeof(gb_cmd
) != sizeof(*cmd
));
1603 gb_cmd
.header
.id
= SVGA_3D_CMD_END_GB_QUERY
;
1604 gb_cmd
.header
.size
= cmd
->header
.size
;
1605 gb_cmd
.q
.cid
= cmd
->q
.cid
;
1606 gb_cmd
.q
.type
= cmd
->q
.type
;
1607 gb_cmd
.q
.mobid
= cmd
->q
.guestResult
.gmrId
;
1608 gb_cmd
.q
.offset
= cmd
->q
.guestResult
.offset
;
1610 memcpy(cmd
, &gb_cmd
, sizeof(*cmd
));
1611 return vmw_cmd_end_gb_query(dev_priv
, sw_context
, header
);
1614 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1615 if (unlikely(ret
!= 0))
1618 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
1619 &cmd
->q
.guestResult
,
1621 if (unlikely(ret
!= 0))
1624 ret
= vmw_query_bo_switch_prepare(dev_priv
, vmw_bo
, sw_context
);
1626 vmw_dmabuf_unreference(&vmw_bo
);
1631 * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
1633 * @dev_priv: Pointer to a device private struct.
1634 * @sw_context: The software context used for this command submission.
1635 * @header: Pointer to the command header in the command stream.
1637 static int vmw_cmd_wait_gb_query(struct vmw_private
*dev_priv
,
1638 struct vmw_sw_context
*sw_context
,
1639 SVGA3dCmdHeader
*header
)
1641 struct vmw_dma_buffer
*vmw_bo
;
1642 struct vmw_query_cmd
{
1643 SVGA3dCmdHeader header
;
1644 SVGA3dCmdWaitForGBQuery q
;
1648 cmd
= container_of(header
, struct vmw_query_cmd
, header
);
1649 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1650 if (unlikely(ret
!= 0))
1653 ret
= vmw_translate_mob_ptr(dev_priv
, sw_context
,
1656 if (unlikely(ret
!= 0))
1659 vmw_dmabuf_unreference(&vmw_bo
);
1664 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
1666 * @dev_priv: Pointer to a device private struct.
1667 * @sw_context: The software context used for this command submission.
1668 * @header: Pointer to the command header in the command stream.
1670 static int vmw_cmd_wait_query(struct vmw_private
*dev_priv
,
1671 struct vmw_sw_context
*sw_context
,
1672 SVGA3dCmdHeader
*header
)
1674 struct vmw_dma_buffer
*vmw_bo
;
1675 struct vmw_query_cmd
{
1676 SVGA3dCmdHeader header
;
1677 SVGA3dCmdWaitForQuery q
;
1681 cmd
= container_of(header
, struct vmw_query_cmd
, header
);
1682 if (dev_priv
->has_mob
) {
1684 SVGA3dCmdHeader header
;
1685 SVGA3dCmdWaitForGBQuery q
;
1688 BUG_ON(sizeof(gb_cmd
) != sizeof(*cmd
));
1690 gb_cmd
.header
.id
= SVGA_3D_CMD_WAIT_FOR_GB_QUERY
;
1691 gb_cmd
.header
.size
= cmd
->header
.size
;
1692 gb_cmd
.q
.cid
= cmd
->q
.cid
;
1693 gb_cmd
.q
.type
= cmd
->q
.type
;
1694 gb_cmd
.q
.mobid
= cmd
->q
.guestResult
.gmrId
;
1695 gb_cmd
.q
.offset
= cmd
->q
.guestResult
.offset
;
1697 memcpy(cmd
, &gb_cmd
, sizeof(*cmd
));
1698 return vmw_cmd_wait_gb_query(dev_priv
, sw_context
, header
);
1701 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1702 if (unlikely(ret
!= 0))
1705 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
1706 &cmd
->q
.guestResult
,
1708 if (unlikely(ret
!= 0))
1711 vmw_dmabuf_unreference(&vmw_bo
);
1715 static int vmw_cmd_dma(struct vmw_private
*dev_priv
,
1716 struct vmw_sw_context
*sw_context
,
1717 SVGA3dCmdHeader
*header
)
1719 struct vmw_dma_buffer
*vmw_bo
= NULL
;
1720 struct vmw_surface
*srf
= NULL
;
1721 struct vmw_dma_cmd
{
1722 SVGA3dCmdHeader header
;
1723 SVGA3dCmdSurfaceDMA dma
;
1726 SVGA3dCmdSurfaceDMASuffix
*suffix
;
1729 cmd
= container_of(header
, struct vmw_dma_cmd
, header
);
1730 suffix
= (SVGA3dCmdSurfaceDMASuffix
*)((unsigned long) &cmd
->dma
+
1731 header
->size
- sizeof(*suffix
));
1733 /* Make sure device and verifier stays in sync. */
1734 if (unlikely(suffix
->suffixSize
!= sizeof(*suffix
))) {
1735 DRM_ERROR("Invalid DMA suffix size.\n");
1739 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
1740 &cmd
->dma
.guest
.ptr
,
1742 if (unlikely(ret
!= 0))
1745 /* Make sure DMA doesn't cross BO boundaries. */
1746 bo_size
= vmw_bo
->base
.num_pages
* PAGE_SIZE
;
1747 if (unlikely(cmd
->dma
.guest
.ptr
.offset
> bo_size
)) {
1748 DRM_ERROR("Invalid DMA offset.\n");
1752 bo_size
-= cmd
->dma
.guest
.ptr
.offset
;
1753 if (unlikely(suffix
->maximumOffset
> bo_size
))
1754 suffix
->maximumOffset
= bo_size
;
1756 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1757 user_surface_converter
, &cmd
->dma
.host
.sid
,
1759 if (unlikely(ret
!= 0)) {
1760 if (unlikely(ret
!= -ERESTARTSYS
))
1761 DRM_ERROR("could not find surface for DMA.\n");
1762 goto out_no_surface
;
1765 srf
= vmw_res_to_srf(sw_context
->res_cache
[vmw_res_surface
].res
);
1767 vmw_kms_cursor_snoop(srf
, sw_context
->fp
->tfile
, &vmw_bo
->base
,
1771 vmw_dmabuf_unreference(&vmw_bo
);
1775 static int vmw_cmd_draw(struct vmw_private
*dev_priv
,
1776 struct vmw_sw_context
*sw_context
,
1777 SVGA3dCmdHeader
*header
)
1779 struct vmw_draw_cmd
{
1780 SVGA3dCmdHeader header
;
1781 SVGA3dCmdDrawPrimitives body
;
1783 SVGA3dVertexDecl
*decl
= (SVGA3dVertexDecl
*)(
1784 (unsigned long)header
+ sizeof(*cmd
));
1785 SVGA3dPrimitiveRange
*range
;
1790 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1791 if (unlikely(ret
!= 0))
1794 cmd
= container_of(header
, struct vmw_draw_cmd
, header
);
1795 maxnum
= (header
->size
- sizeof(cmd
->body
)) / sizeof(*decl
);
1797 if (unlikely(cmd
->body
.numVertexDecls
> maxnum
)) {
1798 DRM_ERROR("Illegal number of vertex declarations.\n");
1802 for (i
= 0; i
< cmd
->body
.numVertexDecls
; ++i
, ++decl
) {
1803 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1804 user_surface_converter
,
1805 &decl
->array
.surfaceId
, NULL
);
1806 if (unlikely(ret
!= 0))
1810 maxnum
= (header
->size
- sizeof(cmd
->body
) -
1811 cmd
->body
.numVertexDecls
* sizeof(*decl
)) / sizeof(*range
);
1812 if (unlikely(cmd
->body
.numRanges
> maxnum
)) {
1813 DRM_ERROR("Illegal number of index ranges.\n");
1817 range
= (SVGA3dPrimitiveRange
*) decl
;
1818 for (i
= 0; i
< cmd
->body
.numRanges
; ++i
, ++range
) {
1819 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1820 user_surface_converter
,
1821 &range
->indexArray
.surfaceId
, NULL
);
1822 if (unlikely(ret
!= 0))
1829 static int vmw_cmd_tex_state(struct vmw_private
*dev_priv
,
1830 struct vmw_sw_context
*sw_context
,
1831 SVGA3dCmdHeader
*header
)
1833 struct vmw_tex_state_cmd
{
1834 SVGA3dCmdHeader header
;
1835 SVGA3dCmdSetTextureState state
;
1838 SVGA3dTextureState
*last_state
= (SVGA3dTextureState
*)
1839 ((unsigned long) header
+ header
->size
+ sizeof(header
));
1840 SVGA3dTextureState
*cur_state
= (SVGA3dTextureState
*)
1841 ((unsigned long) header
+ sizeof(struct vmw_tex_state_cmd
));
1842 struct vmw_resource_val_node
*ctx_node
;
1843 struct vmw_resource_val_node
*res_node
;
1846 cmd
= container_of(header
, struct vmw_tex_state_cmd
,
1849 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
1850 user_context_converter
, &cmd
->state
.cid
,
1852 if (unlikely(ret
!= 0))
1855 for (; cur_state
< last_state
; ++cur_state
) {
1856 if (likely(cur_state
->name
!= SVGA3D_TS_BIND_TEXTURE
))
1859 if (cur_state
->stage
>= SVGA3D_NUM_TEXTURE_UNITS
) {
1860 DRM_ERROR("Illegal texture/sampler unit %u.\n",
1861 (unsigned) cur_state
->stage
);
1865 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1866 user_surface_converter
,
1867 &cur_state
->value
, &res_node
);
1868 if (unlikely(ret
!= 0))
1871 if (dev_priv
->has_mob
) {
1872 struct vmw_ctx_bindinfo_tex binding
;
1874 binding
.bi
.ctx
= ctx_node
->res
;
1875 binding
.bi
.res
= res_node
? res_node
->res
: NULL
;
1876 binding
.bi
.bt
= vmw_ctx_binding_tex
;
1877 binding
.texture_stage
= cur_state
->stage
;
1878 vmw_binding_add(ctx_node
->staged_bindings
, &binding
.bi
,
1879 0, binding
.texture_stage
);
1886 static int vmw_cmd_check_define_gmrfb(struct vmw_private
*dev_priv
,
1887 struct vmw_sw_context
*sw_context
,
1890 struct vmw_dma_buffer
*vmw_bo
;
1895 SVGAFifoCmdDefineGMRFB body
;
1898 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
1901 if (unlikely(ret
!= 0))
1904 vmw_dmabuf_unreference(&vmw_bo
);
1911 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1914 * @dev_priv: Pointer to a device private struct.
1915 * @sw_context: The software context being used for this batch.
1916 * @val_node: The validation node representing the resource.
1917 * @buf_id: Pointer to the user-space backup buffer handle in the command
1919 * @backup_offset: Offset of backup into MOB.
1921 * This function prepares for registering a switch of backup buffers
1922 * in the resource metadata just prior to unreserving. It's basically a wrapper
1923 * around vmw_cmd_res_switch_backup with a different interface.
1925 static int vmw_cmd_res_switch_backup(struct vmw_private
*dev_priv
,
1926 struct vmw_sw_context
*sw_context
,
1927 struct vmw_resource_val_node
*val_node
,
1929 unsigned long backup_offset
)
1931 struct vmw_dma_buffer
*dma_buf
;
1934 ret
= vmw_translate_mob_ptr(dev_priv
, sw_context
, buf_id
, &dma_buf
);
1938 val_node
->switching_backup
= true;
1939 if (val_node
->first_usage
)
1940 val_node
->no_buffer_needed
= true;
1942 vmw_dmabuf_unreference(&val_node
->new_backup
);
1943 val_node
->new_backup
= dma_buf
;
1944 val_node
->new_backup_offset
= backup_offset
;
1951 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1953 * @dev_priv: Pointer to a device private struct.
1954 * @sw_context: The software context being used for this batch.
1955 * @res_type: The resource type.
1956 * @converter: Information about user-space binding for this resource type.
1957 * @res_id: Pointer to the user-space resource handle in the command stream.
1958 * @buf_id: Pointer to the user-space backup buffer handle in the command
1960 * @backup_offset: Offset of backup into MOB.
1962 * This function prepares for registering a switch of backup buffers
1963 * in the resource metadata just prior to unreserving. It's basically a wrapper
1964 * around vmw_cmd_res_switch_backup with a different interface.
1966 static int vmw_cmd_switch_backup(struct vmw_private
*dev_priv
,
1967 struct vmw_sw_context
*sw_context
,
1968 enum vmw_res_type res_type
,
1969 const struct vmw_user_resource_conv
1973 unsigned long backup_offset
)
1975 struct vmw_resource_val_node
*val_node
;
1978 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, res_type
,
1979 converter
, res_id
, &val_node
);
1983 return vmw_cmd_res_switch_backup(dev_priv
, sw_context
, val_node
,
1984 buf_id
, backup_offset
);
1988 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1991 * @dev_priv: Pointer to a device private struct.
1992 * @sw_context: The software context being used for this batch.
1993 * @header: Pointer to the command header in the command stream.
1995 static int vmw_cmd_bind_gb_surface(struct vmw_private
*dev_priv
,
1996 struct vmw_sw_context
*sw_context
,
1997 SVGA3dCmdHeader
*header
)
1999 struct vmw_bind_gb_surface_cmd
{
2000 SVGA3dCmdHeader header
;
2001 SVGA3dCmdBindGBSurface body
;
2004 cmd
= container_of(header
, struct vmw_bind_gb_surface_cmd
, header
);
2006 return vmw_cmd_switch_backup(dev_priv
, sw_context
, vmw_res_surface
,
2007 user_surface_converter
,
2008 &cmd
->body
.sid
, &cmd
->body
.mobid
,
2013 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
2016 * @dev_priv: Pointer to a device private struct.
2017 * @sw_context: The software context being used for this batch.
2018 * @header: Pointer to the command header in the command stream.
2020 static int vmw_cmd_update_gb_image(struct vmw_private
*dev_priv
,
2021 struct vmw_sw_context
*sw_context
,
2022 SVGA3dCmdHeader
*header
)
2024 struct vmw_gb_surface_cmd
{
2025 SVGA3dCmdHeader header
;
2026 SVGA3dCmdUpdateGBImage body
;
2029 cmd
= container_of(header
, struct vmw_gb_surface_cmd
, header
);
2031 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2032 user_surface_converter
,
2033 &cmd
->body
.image
.sid
, NULL
);
2037 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
2040 * @dev_priv: Pointer to a device private struct.
2041 * @sw_context: The software context being used for this batch.
2042 * @header: Pointer to the command header in the command stream.
2044 static int vmw_cmd_update_gb_surface(struct vmw_private
*dev_priv
,
2045 struct vmw_sw_context
*sw_context
,
2046 SVGA3dCmdHeader
*header
)
2048 struct vmw_gb_surface_cmd
{
2049 SVGA3dCmdHeader header
;
2050 SVGA3dCmdUpdateGBSurface body
;
2053 cmd
= container_of(header
, struct vmw_gb_surface_cmd
, header
);
2055 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2056 user_surface_converter
,
2057 &cmd
->body
.sid
, NULL
);
2061 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
2064 * @dev_priv: Pointer to a device private struct.
2065 * @sw_context: The software context being used for this batch.
2066 * @header: Pointer to the command header in the command stream.
2068 static int vmw_cmd_readback_gb_image(struct vmw_private
*dev_priv
,
2069 struct vmw_sw_context
*sw_context
,
2070 SVGA3dCmdHeader
*header
)
2072 struct vmw_gb_surface_cmd
{
2073 SVGA3dCmdHeader header
;
2074 SVGA3dCmdReadbackGBImage body
;
2077 cmd
= container_of(header
, struct vmw_gb_surface_cmd
, header
);
2079 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2080 user_surface_converter
,
2081 &cmd
->body
.image
.sid
, NULL
);
2085 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
2088 * @dev_priv: Pointer to a device private struct.
2089 * @sw_context: The software context being used for this batch.
2090 * @header: Pointer to the command header in the command stream.
2092 static int vmw_cmd_readback_gb_surface(struct vmw_private
*dev_priv
,
2093 struct vmw_sw_context
*sw_context
,
2094 SVGA3dCmdHeader
*header
)
2096 struct vmw_gb_surface_cmd
{
2097 SVGA3dCmdHeader header
;
2098 SVGA3dCmdReadbackGBSurface body
;
2101 cmd
= container_of(header
, struct vmw_gb_surface_cmd
, header
);
2103 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2104 user_surface_converter
,
2105 &cmd
->body
.sid
, NULL
);
2109 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
2112 * @dev_priv: Pointer to a device private struct.
2113 * @sw_context: The software context being used for this batch.
2114 * @header: Pointer to the command header in the command stream.
2116 static int vmw_cmd_invalidate_gb_image(struct vmw_private
*dev_priv
,
2117 struct vmw_sw_context
*sw_context
,
2118 SVGA3dCmdHeader
*header
)
2120 struct vmw_gb_surface_cmd
{
2121 SVGA3dCmdHeader header
;
2122 SVGA3dCmdInvalidateGBImage body
;
2125 cmd
= container_of(header
, struct vmw_gb_surface_cmd
, header
);
2127 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2128 user_surface_converter
,
2129 &cmd
->body
.image
.sid
, NULL
);
2133 * vmw_cmd_invalidate_gb_surface - Validate an
2134 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
2136 * @dev_priv: Pointer to a device private struct.
2137 * @sw_context: The software context being used for this batch.
2138 * @header: Pointer to the command header in the command stream.
2140 static int vmw_cmd_invalidate_gb_surface(struct vmw_private
*dev_priv
,
2141 struct vmw_sw_context
*sw_context
,
2142 SVGA3dCmdHeader
*header
)
2144 struct vmw_gb_surface_cmd
{
2145 SVGA3dCmdHeader header
;
2146 SVGA3dCmdInvalidateGBSurface body
;
2149 cmd
= container_of(header
, struct vmw_gb_surface_cmd
, header
);
2151 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2152 user_surface_converter
,
2153 &cmd
->body
.sid
, NULL
);
2158 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
2161 * @dev_priv: Pointer to a device private struct.
2162 * @sw_context: The software context being used for this batch.
2163 * @header: Pointer to the command header in the command stream.
2165 static int vmw_cmd_shader_define(struct vmw_private
*dev_priv
,
2166 struct vmw_sw_context
*sw_context
,
2167 SVGA3dCmdHeader
*header
)
2169 struct vmw_shader_define_cmd
{
2170 SVGA3dCmdHeader header
;
2171 SVGA3dCmdDefineShader body
;
2175 struct vmw_resource_val_node
*val
;
2177 cmd
= container_of(header
, struct vmw_shader_define_cmd
,
2180 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
2181 user_context_converter
, &cmd
->body
.cid
,
2183 if (unlikely(ret
!= 0))
2186 if (unlikely(!dev_priv
->has_mob
))
2189 size
= cmd
->header
.size
- sizeof(cmd
->body
);
2190 ret
= vmw_compat_shader_add(dev_priv
,
2191 vmw_context_res_man(val
->res
),
2192 cmd
->body
.shid
, cmd
+ 1,
2193 cmd
->body
.type
, size
,
2194 &sw_context
->staged_cmd_res
);
2195 if (unlikely(ret
!= 0))
2198 return vmw_resource_relocation_add(&sw_context
->res_relocations
,
2200 vmw_ptr_diff(sw_context
->buf_start
,
2206 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
2209 * @dev_priv: Pointer to a device private struct.
2210 * @sw_context: The software context being used for this batch.
2211 * @header: Pointer to the command header in the command stream.
2213 static int vmw_cmd_shader_destroy(struct vmw_private
*dev_priv
,
2214 struct vmw_sw_context
*sw_context
,
2215 SVGA3dCmdHeader
*header
)
2217 struct vmw_shader_destroy_cmd
{
2218 SVGA3dCmdHeader header
;
2219 SVGA3dCmdDestroyShader body
;
2222 struct vmw_resource_val_node
*val
;
2224 cmd
= container_of(header
, struct vmw_shader_destroy_cmd
,
2227 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
2228 user_context_converter
, &cmd
->body
.cid
,
2230 if (unlikely(ret
!= 0))
2233 if (unlikely(!dev_priv
->has_mob
))
2236 ret
= vmw_shader_remove(vmw_context_res_man(val
->res
),
2239 &sw_context
->staged_cmd_res
);
2240 if (unlikely(ret
!= 0))
2243 return vmw_resource_relocation_add(&sw_context
->res_relocations
,
2245 vmw_ptr_diff(sw_context
->buf_start
,
2251 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
2254 * @dev_priv: Pointer to a device private struct.
2255 * @sw_context: The software context being used for this batch.
2256 * @header: Pointer to the command header in the command stream.
2258 static int vmw_cmd_set_shader(struct vmw_private
*dev_priv
,
2259 struct vmw_sw_context
*sw_context
,
2260 SVGA3dCmdHeader
*header
)
2262 struct vmw_set_shader_cmd
{
2263 SVGA3dCmdHeader header
;
2264 SVGA3dCmdSetShader body
;
2266 struct vmw_resource_val_node
*ctx_node
, *res_node
= NULL
;
2267 struct vmw_ctx_bindinfo_shader binding
;
2268 struct vmw_resource
*res
= NULL
;
2271 cmd
= container_of(header
, struct vmw_set_shader_cmd
,
2274 if (cmd
->body
.type
>= SVGA3D_SHADERTYPE_PREDX_MAX
) {
2275 DRM_ERROR("Illegal shader type %u.\n",
2276 (unsigned) cmd
->body
.type
);
2280 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
2281 user_context_converter
, &cmd
->body
.cid
,
2283 if (unlikely(ret
!= 0))
2286 if (!dev_priv
->has_mob
)
2289 if (cmd
->body
.shid
!= SVGA3D_INVALID_ID
) {
2290 res
= vmw_shader_lookup(vmw_context_res_man(ctx_node
->res
),
2295 ret
= vmw_cmd_res_reloc_add(dev_priv
, sw_context
,
2296 &cmd
->body
.shid
, res
,
2298 vmw_resource_unreference(&res
);
2299 if (unlikely(ret
!= 0))
2305 ret
= vmw_cmd_res_check(dev_priv
, sw_context
,
2307 user_shader_converter
,
2308 &cmd
->body
.shid
, &res_node
);
2309 if (unlikely(ret
!= 0))
2313 binding
.bi
.ctx
= ctx_node
->res
;
2314 binding
.bi
.res
= res_node
? res_node
->res
: NULL
;
2315 binding
.bi
.bt
= vmw_ctx_binding_shader
;
2316 binding
.shader_slot
= cmd
->body
.type
- SVGA3D_SHADERTYPE_MIN
;
2317 vmw_binding_add(ctx_node
->staged_bindings
, &binding
.bi
,
2318 binding
.shader_slot
, 0);
2323 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2326 * @dev_priv: Pointer to a device private struct.
2327 * @sw_context: The software context being used for this batch.
2328 * @header: Pointer to the command header in the command stream.
2330 static int vmw_cmd_set_shader_const(struct vmw_private
*dev_priv
,
2331 struct vmw_sw_context
*sw_context
,
2332 SVGA3dCmdHeader
*header
)
2334 struct vmw_set_shader_const_cmd
{
2335 SVGA3dCmdHeader header
;
2336 SVGA3dCmdSetShaderConst body
;
2340 cmd
= container_of(header
, struct vmw_set_shader_const_cmd
,
2343 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
2344 user_context_converter
, &cmd
->body
.cid
,
2346 if (unlikely(ret
!= 0))
2349 if (dev_priv
->has_mob
)
2350 header
->id
= SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE
;
2356 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2359 * @dev_priv: Pointer to a device private struct.
2360 * @sw_context: The software context being used for this batch.
2361 * @header: Pointer to the command header in the command stream.
2363 static int vmw_cmd_bind_gb_shader(struct vmw_private
*dev_priv
,
2364 struct vmw_sw_context
*sw_context
,
2365 SVGA3dCmdHeader
*header
)
2367 struct vmw_bind_gb_shader_cmd
{
2368 SVGA3dCmdHeader header
;
2369 SVGA3dCmdBindGBShader body
;
2372 cmd
= container_of(header
, struct vmw_bind_gb_shader_cmd
,
2375 return vmw_cmd_switch_backup(dev_priv
, sw_context
, vmw_res_shader
,
2376 user_shader_converter
,
2377 &cmd
->body
.shid
, &cmd
->body
.mobid
,
2378 cmd
->body
.offsetInBytes
);
2382 * vmw_cmd_dx_set_single_constant_buffer - Validate an
2383 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2385 * @dev_priv: Pointer to a device private struct.
2386 * @sw_context: The software context being used for this batch.
2387 * @header: Pointer to the command header in the command stream.
2390 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private
*dev_priv
,
2391 struct vmw_sw_context
*sw_context
,
2392 SVGA3dCmdHeader
*header
)
2395 SVGA3dCmdHeader header
;
2396 SVGA3dCmdDXSetSingleConstantBuffer body
;
2398 struct vmw_resource_val_node
*res_node
= NULL
;
2399 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
2400 struct vmw_ctx_bindinfo_cb binding
;
2403 if (unlikely(ctx_node
== NULL
)) {
2404 DRM_ERROR("DX Context not set.\n");
2408 cmd
= container_of(header
, typeof(*cmd
), header
);
2409 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2410 user_surface_converter
,
2411 &cmd
->body
.sid
, &res_node
);
2412 if (unlikely(ret
!= 0))
2415 binding
.bi
.ctx
= ctx_node
->res
;
2416 binding
.bi
.res
= res_node
? res_node
->res
: NULL
;
2417 binding
.bi
.bt
= vmw_ctx_binding_cb
;
2418 binding
.shader_slot
= cmd
->body
.type
- SVGA3D_SHADERTYPE_MIN
;
2419 binding
.offset
= cmd
->body
.offsetInBytes
;
2420 binding
.size
= cmd
->body
.sizeInBytes
;
2421 binding
.slot
= cmd
->body
.slot
;
2423 if (binding
.shader_slot
>= SVGA3D_NUM_SHADERTYPE_DX10
||
2424 binding
.slot
>= SVGA3D_DX_MAX_CONSTBUFFERS
) {
2425 DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2426 (unsigned) cmd
->body
.type
,
2427 (unsigned) binding
.slot
);
2431 vmw_binding_add(ctx_node
->staged_bindings
, &binding
.bi
,
2432 binding
.shader_slot
, binding
.slot
);
2438 * vmw_cmd_dx_set_shader_res - Validate an
2439 * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2441 * @dev_priv: Pointer to a device private struct.
2442 * @sw_context: The software context being used for this batch.
2443 * @header: Pointer to the command header in the command stream.
2445 static int vmw_cmd_dx_set_shader_res(struct vmw_private
*dev_priv
,
2446 struct vmw_sw_context
*sw_context
,
2447 SVGA3dCmdHeader
*header
)
2450 SVGA3dCmdHeader header
;
2451 SVGA3dCmdDXSetShaderResources body
;
2452 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2453 u32 num_sr_view
= (cmd
->header
.size
- sizeof(cmd
->body
)) /
2454 sizeof(SVGA3dShaderResourceViewId
);
2456 if ((u64
) cmd
->body
.startView
+ (u64
) num_sr_view
>
2457 (u64
) SVGA3D_DX_MAX_SRVIEWS
||
2458 cmd
->body
.type
>= SVGA3D_SHADERTYPE_DX10_MAX
) {
2459 DRM_ERROR("Invalid shader binding.\n");
2463 return vmw_view_bindings_add(sw_context
, vmw_view_sr
,
2465 cmd
->body
.type
- SVGA3D_SHADERTYPE_MIN
,
2466 (void *) &cmd
[1], num_sr_view
,
2467 cmd
->body
.startView
);
2471 * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2474 * @dev_priv: Pointer to a device private struct.
2475 * @sw_context: The software context being used for this batch.
2476 * @header: Pointer to the command header in the command stream.
2478 static int vmw_cmd_dx_set_shader(struct vmw_private
*dev_priv
,
2479 struct vmw_sw_context
*sw_context
,
2480 SVGA3dCmdHeader
*header
)
2483 SVGA3dCmdHeader header
;
2484 SVGA3dCmdDXSetShader body
;
2486 struct vmw_resource
*res
= NULL
;
2487 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
2488 struct vmw_ctx_bindinfo_shader binding
;
2491 if (unlikely(ctx_node
== NULL
)) {
2492 DRM_ERROR("DX Context not set.\n");
2496 cmd
= container_of(header
, typeof(*cmd
), header
);
2498 if (cmd
->body
.type
>= SVGA3D_SHADERTYPE_DX10_MAX
) {
2499 DRM_ERROR("Illegal shader type %u.\n",
2500 (unsigned) cmd
->body
.type
);
2504 if (cmd
->body
.shaderId
!= SVGA3D_INVALID_ID
) {
2505 res
= vmw_shader_lookup(sw_context
->man
, cmd
->body
.shaderId
, 0);
2507 DRM_ERROR("Could not find shader for binding.\n");
2508 return PTR_ERR(res
);
2511 ret
= vmw_resource_val_add(sw_context
, res
, NULL
);
2516 binding
.bi
.ctx
= ctx_node
->res
;
2517 binding
.bi
.res
= res
;
2518 binding
.bi
.bt
= vmw_ctx_binding_dx_shader
;
2519 binding
.shader_slot
= cmd
->body
.type
- SVGA3D_SHADERTYPE_MIN
;
2521 vmw_binding_add(ctx_node
->staged_bindings
, &binding
.bi
,
2522 binding
.shader_slot
, 0);
2525 vmw_resource_unreference(&res
);
2531 * vmw_cmd_dx_set_vertex_buffers - Validates an
2532 * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2534 * @dev_priv: Pointer to a device private struct.
2535 * @sw_context: The software context being used for this batch.
2536 * @header: Pointer to the command header in the command stream.
2538 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private
*dev_priv
,
2539 struct vmw_sw_context
*sw_context
,
2540 SVGA3dCmdHeader
*header
)
2542 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
2543 struct vmw_ctx_bindinfo_vb binding
;
2544 struct vmw_resource_val_node
*res_node
;
2546 SVGA3dCmdHeader header
;
2547 SVGA3dCmdDXSetVertexBuffers body
;
2548 SVGA3dVertexBuffer buf
[];
2552 if (unlikely(ctx_node
== NULL
)) {
2553 DRM_ERROR("DX Context not set.\n");
2557 cmd
= container_of(header
, typeof(*cmd
), header
);
2558 num
= (cmd
->header
.size
- sizeof(cmd
->body
)) /
2559 sizeof(SVGA3dVertexBuffer
);
2560 if ((u64
)num
+ (u64
)cmd
->body
.startBuffer
>
2561 (u64
)SVGA3D_DX_MAX_VERTEXBUFFERS
) {
2562 DRM_ERROR("Invalid number of vertex buffers.\n");
2566 for (i
= 0; i
< num
; i
++) {
2567 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2568 user_surface_converter
,
2569 &cmd
->buf
[i
].sid
, &res_node
);
2570 if (unlikely(ret
!= 0))
2573 binding
.bi
.ctx
= ctx_node
->res
;
2574 binding
.bi
.bt
= vmw_ctx_binding_vb
;
2575 binding
.bi
.res
= ((res_node
) ? res_node
->res
: NULL
);
2576 binding
.offset
= cmd
->buf
[i
].offset
;
2577 binding
.stride
= cmd
->buf
[i
].stride
;
2578 binding
.slot
= i
+ cmd
->body
.startBuffer
;
2580 vmw_binding_add(ctx_node
->staged_bindings
, &binding
.bi
,
2588 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2589 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2591 * @dev_priv: Pointer to a device private struct.
2592 * @sw_context: The software context being used for this batch.
2593 * @header: Pointer to the command header in the command stream.
2595 static int vmw_cmd_dx_set_index_buffer(struct vmw_private
*dev_priv
,
2596 struct vmw_sw_context
*sw_context
,
2597 SVGA3dCmdHeader
*header
)
2599 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
2600 struct vmw_ctx_bindinfo_ib binding
;
2601 struct vmw_resource_val_node
*res_node
;
2603 SVGA3dCmdHeader header
;
2604 SVGA3dCmdDXSetIndexBuffer body
;
2608 if (unlikely(ctx_node
== NULL
)) {
2609 DRM_ERROR("DX Context not set.\n");
2613 cmd
= container_of(header
, typeof(*cmd
), header
);
2614 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2615 user_surface_converter
,
2616 &cmd
->body
.sid
, &res_node
);
2617 if (unlikely(ret
!= 0))
2620 binding
.bi
.ctx
= ctx_node
->res
;
2621 binding
.bi
.res
= ((res_node
) ? res_node
->res
: NULL
);
2622 binding
.bi
.bt
= vmw_ctx_binding_ib
;
2623 binding
.offset
= cmd
->body
.offset
;
2624 binding
.format
= cmd
->body
.format
;
2626 vmw_binding_add(ctx_node
->staged_bindings
, &binding
.bi
, 0, 0);
2632 * vmw_cmd_dx_set_rendertarget - Validate an
2633 * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2635 * @dev_priv: Pointer to a device private struct.
2636 * @sw_context: The software context being used for this batch.
2637 * @header: Pointer to the command header in the command stream.
2639 static int vmw_cmd_dx_set_rendertargets(struct vmw_private
*dev_priv
,
2640 struct vmw_sw_context
*sw_context
,
2641 SVGA3dCmdHeader
*header
)
2644 SVGA3dCmdHeader header
;
2645 SVGA3dCmdDXSetRenderTargets body
;
2646 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2648 u32 num_rt_view
= (cmd
->header
.size
- sizeof(cmd
->body
)) /
2649 sizeof(SVGA3dRenderTargetViewId
);
2651 if (num_rt_view
> SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS
) {
2652 DRM_ERROR("Invalid DX Rendertarget binding.\n");
2656 ret
= vmw_view_bindings_add(sw_context
, vmw_view_ds
,
2657 vmw_ctx_binding_ds
, 0,
2658 &cmd
->body
.depthStencilViewId
, 1, 0);
2662 return vmw_view_bindings_add(sw_context
, vmw_view_rt
,
2663 vmw_ctx_binding_dx_rt
, 0,
2664 (void *)&cmd
[1], num_rt_view
, 0);
2668 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2669 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2671 * @dev_priv: Pointer to a device private struct.
2672 * @sw_context: The software context being used for this batch.
2673 * @header: Pointer to the command header in the command stream.
2675 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private
*dev_priv
,
2676 struct vmw_sw_context
*sw_context
,
2677 SVGA3dCmdHeader
*header
)
2680 SVGA3dCmdHeader header
;
2681 SVGA3dCmdDXClearRenderTargetView body
;
2682 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2684 return vmw_view_id_val_add(sw_context
, vmw_view_rt
,
2685 cmd
->body
.renderTargetViewId
);
2689 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2690 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2692 * @dev_priv: Pointer to a device private struct.
2693 * @sw_context: The software context being used for this batch.
2694 * @header: Pointer to the command header in the command stream.
2696 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private
*dev_priv
,
2697 struct vmw_sw_context
*sw_context
,
2698 SVGA3dCmdHeader
*header
)
2701 SVGA3dCmdHeader header
;
2702 SVGA3dCmdDXClearDepthStencilView body
;
2703 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2705 return vmw_view_id_val_add(sw_context
, vmw_view_ds
,
2706 cmd
->body
.depthStencilViewId
);
2709 static int vmw_cmd_dx_view_define(struct vmw_private
*dev_priv
,
2710 struct vmw_sw_context
*sw_context
,
2711 SVGA3dCmdHeader
*header
)
2713 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
2714 struct vmw_resource_val_node
*srf_node
;
2715 struct vmw_resource
*res
;
2716 enum vmw_view_type view_type
;
2719 * This is based on the fact that all affected define commands have
2720 * the same initial command body layout.
2723 SVGA3dCmdHeader header
;
2728 if (unlikely(ctx_node
== NULL
)) {
2729 DRM_ERROR("DX Context not set.\n");
2733 view_type
= vmw_view_cmd_to_type(header
->id
);
2734 if (view_type
== vmw_view_max
)
2736 cmd
= container_of(header
, typeof(*cmd
), header
);
2737 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2738 user_surface_converter
,
2739 &cmd
->sid
, &srf_node
);
2740 if (unlikely(ret
!= 0))
2743 res
= vmw_context_cotable(ctx_node
->res
, vmw_view_cotables
[view_type
]);
2744 ret
= vmw_cotable_notify(res
, cmd
->defined_id
);
2745 vmw_resource_unreference(&res
);
2746 if (unlikely(ret
!= 0))
2749 return vmw_view_add(sw_context
->man
,
2755 header
->size
+ sizeof(*header
),
2756 &sw_context
->staged_cmd_res
);
2760 * vmw_cmd_dx_set_so_targets - Validate an
2761 * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2763 * @dev_priv: Pointer to a device private struct.
2764 * @sw_context: The software context being used for this batch.
2765 * @header: Pointer to the command header in the command stream.
2767 static int vmw_cmd_dx_set_so_targets(struct vmw_private
*dev_priv
,
2768 struct vmw_sw_context
*sw_context
,
2769 SVGA3dCmdHeader
*header
)
2771 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
2772 struct vmw_ctx_bindinfo_so binding
;
2773 struct vmw_resource_val_node
*res_node
;
2775 SVGA3dCmdHeader header
;
2776 SVGA3dCmdDXSetSOTargets body
;
2777 SVGA3dSoTarget targets
[];
2781 if (unlikely(ctx_node
== NULL
)) {
2782 DRM_ERROR("DX Context not set.\n");
2786 cmd
= container_of(header
, typeof(*cmd
), header
);
2787 num
= (cmd
->header
.size
- sizeof(cmd
->body
)) /
2788 sizeof(SVGA3dSoTarget
);
2790 if (num
> SVGA3D_DX_MAX_SOTARGETS
) {
2791 DRM_ERROR("Invalid DX SO binding.\n");
2795 for (i
= 0; i
< num
; i
++) {
2796 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2797 user_surface_converter
,
2798 &cmd
->targets
[i
].sid
, &res_node
);
2799 if (unlikely(ret
!= 0))
2802 binding
.bi
.ctx
= ctx_node
->res
;
2803 binding
.bi
.res
= ((res_node
) ? res_node
->res
: NULL
);
2804 binding
.bi
.bt
= vmw_ctx_binding_so
,
2805 binding
.offset
= cmd
->targets
[i
].offset
;
2806 binding
.size
= cmd
->targets
[i
].sizeInBytes
;
2809 vmw_binding_add(ctx_node
->staged_bindings
, &binding
.bi
,
2816 static int vmw_cmd_dx_so_define(struct vmw_private
*dev_priv
,
2817 struct vmw_sw_context
*sw_context
,
2818 SVGA3dCmdHeader
*header
)
2820 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
2821 struct vmw_resource
*res
;
2823 * This is based on the fact that all affected define commands have
2824 * the same initial command body layout.
2827 SVGA3dCmdHeader header
;
2830 enum vmw_so_type so_type
;
2833 if (unlikely(ctx_node
== NULL
)) {
2834 DRM_ERROR("DX Context not set.\n");
2838 so_type
= vmw_so_cmd_to_type(header
->id
);
2839 res
= vmw_context_cotable(ctx_node
->res
, vmw_so_cotables
[so_type
]);
2840 cmd
= container_of(header
, typeof(*cmd
), header
);
2841 ret
= vmw_cotable_notify(res
, cmd
->defined_id
);
2842 vmw_resource_unreference(&res
);
2848 * vmw_cmd_dx_check_subresource - Validate an
2849 * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2851 * @dev_priv: Pointer to a device private struct.
2852 * @sw_context: The software context being used for this batch.
2853 * @header: Pointer to the command header in the command stream.
2855 static int vmw_cmd_dx_check_subresource(struct vmw_private
*dev_priv
,
2856 struct vmw_sw_context
*sw_context
,
2857 SVGA3dCmdHeader
*header
)
2860 SVGA3dCmdHeader header
;
2862 SVGA3dCmdDXReadbackSubResource r_body
;
2863 SVGA3dCmdDXInvalidateSubResource i_body
;
2864 SVGA3dCmdDXUpdateSubResource u_body
;
2865 SVGA3dSurfaceId sid
;
2869 BUILD_BUG_ON(offsetof(typeof(*cmd
), r_body
.sid
) !=
2870 offsetof(typeof(*cmd
), sid
));
2871 BUILD_BUG_ON(offsetof(typeof(*cmd
), i_body
.sid
) !=
2872 offsetof(typeof(*cmd
), sid
));
2873 BUILD_BUG_ON(offsetof(typeof(*cmd
), u_body
.sid
) !=
2874 offsetof(typeof(*cmd
), sid
));
2876 cmd
= container_of(header
, typeof(*cmd
), header
);
2878 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2879 user_surface_converter
,
2883 static int vmw_cmd_dx_cid_check(struct vmw_private
*dev_priv
,
2884 struct vmw_sw_context
*sw_context
,
2885 SVGA3dCmdHeader
*header
)
2887 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
2889 if (unlikely(ctx_node
== NULL
)) {
2890 DRM_ERROR("DX Context not set.\n");
2898 * vmw_cmd_dx_view_remove - validate a view remove command and
2899 * schedule the view resource for removal.
2901 * @dev_priv: Pointer to a device private struct.
2902 * @sw_context: The software context being used for this batch.
2903 * @header: Pointer to the command header in the command stream.
2905 * Check that the view exists, and if it was not created using this
2906 * command batch, conditionally make this command a NOP.
2908 static int vmw_cmd_dx_view_remove(struct vmw_private
*dev_priv
,
2909 struct vmw_sw_context
*sw_context
,
2910 SVGA3dCmdHeader
*header
)
2912 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
2914 SVGA3dCmdHeader header
;
2915 union vmw_view_destroy body
;
2916 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2917 enum vmw_view_type view_type
= vmw_view_cmd_to_type(header
->id
);
2918 struct vmw_resource
*view
;
2922 DRM_ERROR("DX Context not set.\n");
2926 ret
= vmw_view_remove(sw_context
->man
,
2927 cmd
->body
.view_id
, view_type
,
2928 &sw_context
->staged_cmd_res
,
2934 * If the view wasn't created during this command batch, it might
2935 * have been removed due to a context swapout, so add a
2936 * relocation to conditionally make this command a NOP to avoid
2939 return vmw_resource_relocation_add(&sw_context
->res_relocations
,
2941 vmw_ptr_diff(sw_context
->buf_start
,
2943 vmw_res_rel_cond_nop
);
2947 * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2950 * @dev_priv: Pointer to a device private struct.
2951 * @sw_context: The software context being used for this batch.
2952 * @header: Pointer to the command header in the command stream.
2954 static int vmw_cmd_dx_define_shader(struct vmw_private
*dev_priv
,
2955 struct vmw_sw_context
*sw_context
,
2956 SVGA3dCmdHeader
*header
)
2958 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
2959 struct vmw_resource
*res
;
2961 SVGA3dCmdHeader header
;
2962 SVGA3dCmdDXDefineShader body
;
2963 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2967 DRM_ERROR("DX Context not set.\n");
2971 res
= vmw_context_cotable(ctx_node
->res
, SVGA_COTABLE_DXSHADER
);
2972 ret
= vmw_cotable_notify(res
, cmd
->body
.shaderId
);
2973 vmw_resource_unreference(&res
);
2977 return vmw_dx_shader_add(sw_context
->man
, ctx_node
->res
,
2978 cmd
->body
.shaderId
, cmd
->body
.type
,
2979 &sw_context
->staged_cmd_res
);
2983 * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2986 * @dev_priv: Pointer to a device private struct.
2987 * @sw_context: The software context being used for this batch.
2988 * @header: Pointer to the command header in the command stream.
2990 static int vmw_cmd_dx_destroy_shader(struct vmw_private
*dev_priv
,
2991 struct vmw_sw_context
*sw_context
,
2992 SVGA3dCmdHeader
*header
)
2994 struct vmw_resource_val_node
*ctx_node
= sw_context
->dx_ctx_node
;
2996 SVGA3dCmdHeader header
;
2997 SVGA3dCmdDXDestroyShader body
;
2998 } *cmd
= container_of(header
, typeof(*cmd
), header
);
3002 DRM_ERROR("DX Context not set.\n");
3006 ret
= vmw_shader_remove(sw_context
->man
, cmd
->body
.shaderId
, 0,
3007 &sw_context
->staged_cmd_res
);
3009 DRM_ERROR("Could not find shader to remove.\n");
3015 * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
3018 * @dev_priv: Pointer to a device private struct.
3019 * @sw_context: The software context being used for this batch.
3020 * @header: Pointer to the command header in the command stream.
3022 static int vmw_cmd_dx_bind_shader(struct vmw_private
*dev_priv
,
3023 struct vmw_sw_context
*sw_context
,
3024 SVGA3dCmdHeader
*header
)
3026 struct vmw_resource_val_node
*ctx_node
;
3027 struct vmw_resource_val_node
*res_node
;
3028 struct vmw_resource
*res
;
3030 SVGA3dCmdHeader header
;
3031 SVGA3dCmdDXBindShader body
;
3032 } *cmd
= container_of(header
, typeof(*cmd
), header
);
3035 if (cmd
->body
.cid
!= SVGA3D_INVALID_ID
) {
3036 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
3037 user_context_converter
,
3038 &cmd
->body
.cid
, &ctx_node
);
3042 ctx_node
= sw_context
->dx_ctx_node
;
3044 DRM_ERROR("DX Context not set.\n");
3049 res
= vmw_shader_lookup(vmw_context_res_man(ctx_node
->res
),
3052 DRM_ERROR("Could not find shader to bind.\n");
3053 return PTR_ERR(res
);
3056 ret
= vmw_resource_val_add(sw_context
, res
, &res_node
);
3058 DRM_ERROR("Error creating resource validation node.\n");
3063 ret
= vmw_cmd_res_switch_backup(dev_priv
, sw_context
, res_node
,
3065 cmd
->body
.offsetInBytes
);
3067 vmw_resource_unreference(&res
);
3073 * vmw_cmd_dx_genmips - Validate an SVGA_3D_CMD_DX_GENMIPS command
3075 * @dev_priv: Pointer to a device private struct.
3076 * @sw_context: The software context being used for this batch.
3077 * @header: Pointer to the command header in the command stream.
3079 static int vmw_cmd_dx_genmips(struct vmw_private
*dev_priv
,
3080 struct vmw_sw_context
*sw_context
,
3081 SVGA3dCmdHeader
*header
)
3084 SVGA3dCmdHeader header
;
3085 SVGA3dCmdDXGenMips body
;
3086 } *cmd
= container_of(header
, typeof(*cmd
), header
);
3088 return vmw_view_id_val_add(sw_context
, vmw_view_sr
,
3089 cmd
->body
.shaderResourceViewId
);
3093 * vmw_cmd_dx_transfer_from_buffer -
3094 * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
3096 * @dev_priv: Pointer to a device private struct.
3097 * @sw_context: The software context being used for this batch.
3098 * @header: Pointer to the command header in the command stream.
3100 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private
*dev_priv
,
3101 struct vmw_sw_context
*sw_context
,
3102 SVGA3dCmdHeader
*header
)
3105 SVGA3dCmdHeader header
;
3106 SVGA3dCmdDXTransferFromBuffer body
;
3107 } *cmd
= container_of(header
, typeof(*cmd
), header
);
3110 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
3111 user_surface_converter
,
3112 &cmd
->body
.srcSid
, NULL
);
3116 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
3117 user_surface_converter
,
3118 &cmd
->body
.destSid
, NULL
);
3121 static int vmw_cmd_check_not_3d(struct vmw_private
*dev_priv
,
3122 struct vmw_sw_context
*sw_context
,
3123 void *buf
, uint32_t *size
)
3125 uint32_t size_remaining
= *size
;
3128 cmd_id
= ((uint32_t *)buf
)[0];
3130 case SVGA_CMD_UPDATE
:
3131 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate
);
3133 case SVGA_CMD_DEFINE_GMRFB
:
3134 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB
);
3136 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN
:
3137 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen
);
3139 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB
:
3140 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen
);
3143 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id
);
3147 if (*size
> size_remaining
) {
3148 DRM_ERROR("Invalid SVGA command (size mismatch):"
3153 if (unlikely(!sw_context
->kernel
)) {
3154 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id
);
3158 if (cmd_id
== SVGA_CMD_DEFINE_GMRFB
)
3159 return vmw_cmd_check_define_gmrfb(dev_priv
, sw_context
, buf
);
3164 static const struct vmw_cmd_entry vmw_cmd_entries
[SVGA_3D_CMD_MAX
] = {
3165 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE
, &vmw_cmd_invalid
,
3166 false, false, false),
3167 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY
, &vmw_cmd_invalid
,
3168 false, false, false),
3169 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY
, &vmw_cmd_surface_copy_check
,
3170 true, false, false),
3171 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT
, &vmw_cmd_stretch_blt_check
,
3172 true, false, false),
3173 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA
, &vmw_cmd_dma
,
3174 true, false, false),
3175 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE
, &vmw_cmd_invalid
,
3176 false, false, false),
3177 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY
, &vmw_cmd_invalid
,
3178 false, false, false),
3179 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM
, &vmw_cmd_cid_check
,
3180 true, false, false),
3181 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE
, &vmw_cmd_cid_check
,
3182 true, false, false),
3183 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE
, &vmw_cmd_cid_check
,
3184 true, false, false),
3185 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET
,
3186 &vmw_cmd_set_render_target_check
, true, false, false),
3187 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE
, &vmw_cmd_tex_state
,
3188 true, false, false),
3189 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL
, &vmw_cmd_cid_check
,
3190 true, false, false),
3191 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA
, &vmw_cmd_cid_check
,
3192 true, false, false),
3193 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED
, &vmw_cmd_cid_check
,
3194 true, false, false),
3195 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT
, &vmw_cmd_cid_check
,
3196 true, false, false),
3197 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE
, &vmw_cmd_cid_check
,
3198 true, false, false),
3199 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR
, &vmw_cmd_cid_check
,
3200 true, false, false),
3201 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT
, &vmw_cmd_present_check
,
3202 false, false, false),
3203 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE
, &vmw_cmd_shader_define
,
3204 true, false, false),
3205 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY
, &vmw_cmd_shader_destroy
,
3206 true, false, false),
3207 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER
, &vmw_cmd_set_shader
,
3208 true, false, false),
3209 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST
, &vmw_cmd_set_shader_const
,
3210 true, false, false),
3211 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES
, &vmw_cmd_draw
,
3212 true, false, false),
3213 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT
, &vmw_cmd_cid_check
,
3214 true, false, false),
3215 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY
, &vmw_cmd_begin_query
,
3216 true, false, false),
3217 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY
, &vmw_cmd_end_query
,
3218 true, false, false),
3219 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY
, &vmw_cmd_wait_query
,
3220 true, false, false),
3221 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK
, &vmw_cmd_ok
,
3222 true, false, false),
3223 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN
,
3224 &vmw_cmd_blt_surf_screen_check
, false, false, false),
3225 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2
, &vmw_cmd_invalid
,
3226 false, false, false),
3227 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS
, &vmw_cmd_invalid
,
3228 false, false, false),
3229 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE
, &vmw_cmd_invalid
,
3230 false, false, false),
3231 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE
, &vmw_cmd_invalid
,
3232 false, false, false),
3233 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA
, &vmw_cmd_invalid
,
3234 false, false, false),
3235 VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE
, &vmw_cmd_invalid
,
3236 false, false, false),
3237 VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE
, &vmw_cmd_invalid
,
3238 false, false, false),
3239 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT
, &vmw_cmd_invalid
,
3240 false, false, false),
3241 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT
, &vmw_cmd_invalid
,
3242 false, false, false),
3243 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT
, &vmw_cmd_invalid
,
3244 false, false, false),
3245 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL
, &vmw_cmd_invalid
,
3246 false, false, false),
3247 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND
, &vmw_cmd_invalid
,
3248 false, false, false),
3249 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND
, &vmw_cmd_invalid
,
3250 false, false, false),
3251 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE
, &vmw_cmd_invalid
,
3252 false, false, true),
3253 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE
, &vmw_cmd_invalid
,
3254 false, false, true),
3255 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB
, &vmw_cmd_invalid
,
3256 false, false, true),
3257 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB
, &vmw_cmd_invalid
,
3258 false, false, true),
3259 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64
, &vmw_cmd_invalid
,
3260 false, false, true),
3261 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING
, &vmw_cmd_invalid
,
3262 false, false, true),
3263 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE
, &vmw_cmd_invalid
,
3264 false, false, true),
3265 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE
, &vmw_cmd_invalid
,
3266 false, false, true),
3267 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE
, &vmw_cmd_bind_gb_surface
,
3269 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE
, &vmw_cmd_invalid
,
3270 false, false, true),
3271 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE
, &vmw_cmd_update_gb_image
,
3273 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE
,
3274 &vmw_cmd_update_gb_surface
, true, false, true),
3275 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE
,
3276 &vmw_cmd_readback_gb_image
, true, false, true),
3277 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE
,
3278 &vmw_cmd_readback_gb_surface
, true, false, true),
3279 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE
,
3280 &vmw_cmd_invalidate_gb_image
, true, false, true),
3281 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE
,
3282 &vmw_cmd_invalidate_gb_surface
, true, false, true),
3283 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT
, &vmw_cmd_invalid
,
3284 false, false, true),
3285 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT
, &vmw_cmd_invalid
,
3286 false, false, true),
3287 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT
, &vmw_cmd_invalid
,
3288 false, false, true),
3289 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT
, &vmw_cmd_invalid
,
3290 false, false, true),
3291 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT
, &vmw_cmd_invalid
,
3292 false, false, true),
3293 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER
, &vmw_cmd_invalid
,
3294 false, false, true),
3295 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER
, &vmw_cmd_bind_gb_shader
,
3297 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER
, &vmw_cmd_invalid
,
3298 false, false, true),
3299 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64
, &vmw_cmd_invalid
,
3300 false, false, false),
3301 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY
, &vmw_cmd_begin_gb_query
,
3303 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY
, &vmw_cmd_end_gb_query
,
3305 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY
, &vmw_cmd_wait_gb_query
,
3307 VMW_CMD_DEF(SVGA_3D_CMD_NOP
, &vmw_cmd_ok
,
3309 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR
, &vmw_cmd_ok
,
3311 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART
, &vmw_cmd_invalid
,
3312 false, false, true),
3313 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART
, &vmw_cmd_invalid
,
3314 false, false, true),
3315 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART
, &vmw_cmd_invalid
,
3316 false, false, true),
3317 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE
, &vmw_cmd_invalid
,
3318 false, false, true),
3319 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET
, &vmw_cmd_invalid
,
3320 false, false, true),
3321 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET
, &vmw_cmd_invalid
,
3322 false, false, true),
3323 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET
, &vmw_cmd_invalid
,
3324 false, false, true),
3325 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET
, &vmw_cmd_invalid
,
3326 false, false, true),
3327 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL
, &vmw_cmd_invalid
,
3328 false, false, true),
3329 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL
, &vmw_cmd_invalid
,
3330 false, false, true),
3331 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE
, &vmw_cmd_cid_check
,
3333 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA
, &vmw_cmd_invalid
,
3334 false, false, true),
3335 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH
, &vmw_cmd_invalid
,
3336 false, false, true),
3337 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE
, &vmw_cmd_invalid
,
3338 false, false, true),
3339 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2
, &vmw_cmd_invalid
,
3340 false, false, true),
3345 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT
, &vmw_cmd_invalid
,
3346 false, false, true),
3347 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT
, &vmw_cmd_invalid
,
3348 false, false, true),
3349 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT
, &vmw_cmd_invalid
,
3350 false, false, true),
3351 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT
, &vmw_cmd_invalid
,
3352 false, false, true),
3353 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT
, &vmw_cmd_invalid
,
3354 false, false, true),
3355 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER
,
3356 &vmw_cmd_dx_set_single_constant_buffer
, true, false, true),
3357 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
,
3358 &vmw_cmd_dx_set_shader_res
, true, false, true),
3359 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER
, &vmw_cmd_dx_set_shader
,
3361 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS
, &vmw_cmd_dx_cid_check
,
3363 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW
, &vmw_cmd_dx_cid_check
,
3365 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED
, &vmw_cmd_dx_cid_check
,
3367 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED
, &vmw_cmd_dx_cid_check
,
3369 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED
,
3370 &vmw_cmd_dx_cid_check
, true, false, true),
3371 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO
, &vmw_cmd_dx_cid_check
,
3373 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
,
3374 &vmw_cmd_dx_set_vertex_buffers
, true, false, true),
3375 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER
,
3376 &vmw_cmd_dx_set_index_buffer
, true, false, true),
3377 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS
,
3378 &vmw_cmd_dx_set_rendertargets
, true, false, true),
3379 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE
, &vmw_cmd_dx_cid_check
,
3381 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE
,
3382 &vmw_cmd_dx_cid_check
, true, false, true),
3383 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE
,
3384 &vmw_cmd_dx_cid_check
, true, false, true),
3385 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY
, &vmw_cmd_dx_define_query
,
3387 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY
, &vmw_cmd_dx_cid_check
,
3389 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY
, &vmw_cmd_dx_bind_query
,
3391 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET
,
3392 &vmw_cmd_dx_cid_check
, true, false, true),
3393 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY
, &vmw_cmd_dx_cid_check
,
3395 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY
, &vmw_cmd_dx_cid_check
,
3397 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY
, &vmw_cmd_invalid
,
3399 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION
, &vmw_cmd_dx_cid_check
,
3401 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS
, &vmw_cmd_dx_cid_check
,
3403 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS
, &vmw_cmd_dx_cid_check
,
3405 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW
,
3406 &vmw_cmd_dx_clear_rendertarget_view
, true, false, true),
3407 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW
,
3408 &vmw_cmd_dx_clear_depthstencil_view
, true, false, true),
3409 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY
, &vmw_cmd_invalid
,
3411 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS
, &vmw_cmd_dx_genmips
,
3413 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE
,
3414 &vmw_cmd_dx_check_subresource
, true, false, true),
3415 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE
,
3416 &vmw_cmd_dx_check_subresource
, true, false, true),
3417 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE
,
3418 &vmw_cmd_dx_check_subresource
, true, false, true),
3419 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW
,
3420 &vmw_cmd_dx_view_define
, true, false, true),
3421 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW
,
3422 &vmw_cmd_dx_view_remove
, true, false, true),
3423 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW
,
3424 &vmw_cmd_dx_view_define
, true, false, true),
3425 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW
,
3426 &vmw_cmd_dx_view_remove
, true, false, true),
3427 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW
,
3428 &vmw_cmd_dx_view_define
, true, false, true),
3429 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW
,
3430 &vmw_cmd_dx_view_remove
, true, false, true),
3431 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT
,
3432 &vmw_cmd_dx_so_define
, true, false, true),
3433 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT
,
3434 &vmw_cmd_dx_cid_check
, true, false, true),
3435 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE
,
3436 &vmw_cmd_dx_so_define
, true, false, true),
3437 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE
,
3438 &vmw_cmd_dx_cid_check
, true, false, true),
3439 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE
,
3440 &vmw_cmd_dx_so_define
, true, false, true),
3441 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE
,
3442 &vmw_cmd_dx_cid_check
, true, false, true),
3443 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE
,
3444 &vmw_cmd_dx_so_define
, true, false, true),
3445 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE
,
3446 &vmw_cmd_dx_cid_check
, true, false, true),
3447 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE
,
3448 &vmw_cmd_dx_so_define
, true, false, true),
3449 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE
,
3450 &vmw_cmd_dx_cid_check
, true, false, true),
3451 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER
,
3452 &vmw_cmd_dx_define_shader
, true, false, true),
3453 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER
,
3454 &vmw_cmd_dx_destroy_shader
, true, false, true),
3455 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER
,
3456 &vmw_cmd_dx_bind_shader
, true, false, true),
3457 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT
,
3458 &vmw_cmd_dx_so_define
, true, false, true),
3459 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT
,
3460 &vmw_cmd_dx_cid_check
, true, false, true),
3461 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT
, &vmw_cmd_dx_cid_check
,
3463 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS
,
3464 &vmw_cmd_dx_set_so_targets
, true, false, true),
3465 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT
,
3466 &vmw_cmd_dx_cid_check
, true, false, true),
3467 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY
,
3468 &vmw_cmd_dx_cid_check
, true, false, true),
3469 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY
,
3470 &vmw_cmd_buffer_copy_check
, true, false, true),
3471 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION
,
3472 &vmw_cmd_pred_copy_check
, true, false, true),
3473 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER
,
3474 &vmw_cmd_dx_transfer_from_buffer
,
3478 bool vmw_cmd_describe(const void *buf
, u32
*size
, char const **cmd
)
3480 u32 cmd_id
= ((u32
*) buf
)[0];
3482 if (cmd_id
>= SVGA_CMD_MAX
) {
3483 SVGA3dCmdHeader
*header
= (SVGA3dCmdHeader
*) buf
;
3484 const struct vmw_cmd_entry
*entry
;
3486 *size
= header
->size
+ sizeof(SVGA3dCmdHeader
);
3487 cmd_id
= header
->id
;
3488 if (cmd_id
>= SVGA_3D_CMD_MAX
)
3491 cmd_id
-= SVGA_3D_CMD_BASE
;
3492 entry
= &vmw_cmd_entries
[cmd_id
];
3493 *cmd
= entry
->cmd_name
;
3498 case SVGA_CMD_UPDATE
:
3499 *cmd
= "SVGA_CMD_UPDATE";
3500 *size
= sizeof(u32
) + sizeof(SVGAFifoCmdUpdate
);
3502 case SVGA_CMD_DEFINE_GMRFB
:
3503 *cmd
= "SVGA_CMD_DEFINE_GMRFB";
3504 *size
= sizeof(u32
) + sizeof(SVGAFifoCmdDefineGMRFB
);
3506 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN
:
3507 *cmd
= "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3508 *size
= sizeof(u32
) + sizeof(SVGAFifoCmdBlitGMRFBToScreen
);
3510 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB
:
3511 *cmd
= "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3512 *size
= sizeof(u32
) + sizeof(SVGAFifoCmdBlitGMRFBToScreen
);
3523 static int vmw_cmd_check(struct vmw_private
*dev_priv
,
3524 struct vmw_sw_context
*sw_context
,
3525 void *buf
, uint32_t *size
)
3528 uint32_t size_remaining
= *size
;
3529 SVGA3dCmdHeader
*header
= (SVGA3dCmdHeader
*) buf
;
3531 const struct vmw_cmd_entry
*entry
;
3532 bool gb
= dev_priv
->capabilities
& SVGA_CAP_GBOBJECTS
;
3534 cmd_id
= ((uint32_t *)buf
)[0];
3535 /* Handle any none 3D commands */
3536 if (unlikely(cmd_id
< SVGA_CMD_MAX
))
3537 return vmw_cmd_check_not_3d(dev_priv
, sw_context
, buf
, size
);
3540 cmd_id
= header
->id
;
3541 *size
= header
->size
+ sizeof(SVGA3dCmdHeader
);
3543 cmd_id
-= SVGA_3D_CMD_BASE
;
3544 if (unlikely(*size
> size_remaining
))
3547 if (unlikely(cmd_id
>= SVGA_3D_CMD_MAX
- SVGA_3D_CMD_BASE
))
3550 entry
= &vmw_cmd_entries
[cmd_id
];
3551 if (unlikely(!entry
->func
))
3554 if (unlikely(!entry
->user_allow
&& !sw_context
->kernel
))
3555 goto out_privileged
;
3557 if (unlikely(entry
->gb_disable
&& gb
))
3560 if (unlikely(entry
->gb_enable
&& !gb
))
3563 ret
= entry
->func(dev_priv
, sw_context
, header
);
3564 if (unlikely(ret
!= 0))
3569 DRM_ERROR("Invalid SVGA3D command: %d\n",
3570 cmd_id
+ SVGA_3D_CMD_BASE
);
3573 DRM_ERROR("Privileged SVGA3D command: %d\n",
3574 cmd_id
+ SVGA_3D_CMD_BASE
);
3577 DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3578 cmd_id
+ SVGA_3D_CMD_BASE
);
3581 DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
3582 cmd_id
+ SVGA_3D_CMD_BASE
);
3586 static int vmw_cmd_check_all(struct vmw_private
*dev_priv
,
3587 struct vmw_sw_context
*sw_context
,
3591 int32_t cur_size
= size
;
3594 sw_context
->buf_start
= buf
;
3596 while (cur_size
> 0) {
3598 ret
= vmw_cmd_check(dev_priv
, sw_context
, buf
, &size
);
3599 if (unlikely(ret
!= 0))
3601 buf
= (void *)((unsigned long) buf
+ size
);
3605 if (unlikely(cur_size
!= 0)) {
3606 DRM_ERROR("Command verifier out of sync.\n");
3613 static void vmw_free_relocations(struct vmw_sw_context
*sw_context
)
3615 sw_context
->cur_reloc
= 0;
3618 static void vmw_apply_relocations(struct vmw_sw_context
*sw_context
)
3621 struct vmw_relocation
*reloc
;
3622 struct ttm_validate_buffer
*validate
;
3623 struct ttm_buffer_object
*bo
;
3625 for (i
= 0; i
< sw_context
->cur_reloc
; ++i
) {
3626 reloc
= &sw_context
->relocs
[i
];
3627 validate
= &sw_context
->val_bufs
[reloc
->index
].base
;
3629 switch (bo
->mem
.mem_type
) {
3631 reloc
->location
->offset
+= bo
->offset
;
3632 reloc
->location
->gmrId
= SVGA_GMR_FRAMEBUFFER
;
3635 reloc
->location
->gmrId
= bo
->mem
.start
;
3638 *reloc
->mob_loc
= bo
->mem
.start
;
3644 vmw_free_relocations(sw_context
);
3648 * vmw_resource_list_unrefererence - Free up a resource list and unreference
3649 * all resources referenced by it.
3651 * @list: The resource list.
3653 static void vmw_resource_list_unreference(struct vmw_sw_context
*sw_context
,
3654 struct list_head
*list
)
3656 struct vmw_resource_val_node
*val
, *val_next
;
3659 * Drop references to resources held during command submission.
3662 list_for_each_entry_safe(val
, val_next
, list
, head
) {
3663 list_del_init(&val
->head
);
3664 vmw_resource_unreference(&val
->res
);
3666 if (val
->staged_bindings
) {
3667 if (val
->staged_bindings
!= sw_context
->staged_bindings
)
3668 vmw_binding_state_free(val
->staged_bindings
);
3670 sw_context
->staged_bindings_inuse
= false;
3671 val
->staged_bindings
= NULL
;
3678 static void vmw_clear_validations(struct vmw_sw_context
*sw_context
)
3680 struct vmw_validate_buffer
*entry
, *next
;
3681 struct vmw_resource_val_node
*val
;
3684 * Drop references to DMA buffers held during command submission.
3686 list_for_each_entry_safe(entry
, next
, &sw_context
->validate_nodes
,
3688 list_del(&entry
->base
.head
);
3689 ttm_bo_unref(&entry
->base
.bo
);
3690 (void) drm_ht_remove_item(&sw_context
->res_ht
, &entry
->hash
);
3691 sw_context
->cur_val_buf
--;
3693 BUG_ON(sw_context
->cur_val_buf
!= 0);
3695 list_for_each_entry(val
, &sw_context
->resource_list
, head
)
3696 (void) drm_ht_remove_item(&sw_context
->res_ht
, &val
->hash
);
3699 int vmw_validate_single_buffer(struct vmw_private
*dev_priv
,
3700 struct ttm_buffer_object
*bo
,
3702 bool validate_as_mob
)
3704 struct vmw_dma_buffer
*vbo
= container_of(bo
, struct vmw_dma_buffer
,
3706 struct ttm_operation_ctx ctx
= { interruptible
, true };
3709 if (vbo
->pin_count
> 0)
3712 if (validate_as_mob
)
3713 return ttm_bo_validate(bo
, &vmw_mob_placement
, &ctx
);
3716 * Put BO in VRAM if there is space, otherwise as a GMR.
3717 * If there is no space in VRAM and GMR ids are all used up,
3718 * start evicting GMRs to make room. If the DMA buffer can't be
3719 * used as a GMR, this will return -ENOMEM.
3722 ret
= ttm_bo_validate(bo
, &vmw_vram_gmr_placement
, &ctx
);
3723 if (likely(ret
== 0 || ret
== -ERESTARTSYS
))
3727 * If that failed, try VRAM again, this time evicting
3728 * previous contents.
3731 ret
= ttm_bo_validate(bo
, &vmw_vram_placement
, &ctx
);
3735 static int vmw_validate_buffers(struct vmw_private
*dev_priv
,
3736 struct vmw_sw_context
*sw_context
)
3738 struct vmw_validate_buffer
*entry
;
3741 list_for_each_entry(entry
, &sw_context
->validate_nodes
, base
.head
) {
3742 ret
= vmw_validate_single_buffer(dev_priv
, entry
->base
.bo
,
3744 entry
->validate_as_mob
);
3745 if (unlikely(ret
!= 0))
3751 static int vmw_resize_cmd_bounce(struct vmw_sw_context
*sw_context
,
3754 if (likely(sw_context
->cmd_bounce_size
>= size
))
3757 if (sw_context
->cmd_bounce_size
== 0)
3758 sw_context
->cmd_bounce_size
= VMWGFX_CMD_BOUNCE_INIT_SIZE
;
3760 while (sw_context
->cmd_bounce_size
< size
) {
3761 sw_context
->cmd_bounce_size
=
3762 PAGE_ALIGN(sw_context
->cmd_bounce_size
+
3763 (sw_context
->cmd_bounce_size
>> 1));
3766 vfree(sw_context
->cmd_bounce
);
3767 sw_context
->cmd_bounce
= vmalloc(sw_context
->cmd_bounce_size
);
3769 if (sw_context
->cmd_bounce
== NULL
) {
3770 DRM_ERROR("Failed to allocate command bounce buffer.\n");
3771 sw_context
->cmd_bounce_size
= 0;
3779 * vmw_execbuf_fence_commands - create and submit a command stream fence
3781 * Creates a fence object and submits a command stream marker.
3782 * If this fails for some reason, We sync the fifo and return NULL.
3783 * It is then safe to fence buffers with a NULL pointer.
3785 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3786 * a userspace handle if @p_handle is not NULL, otherwise not.
3789 int vmw_execbuf_fence_commands(struct drm_file
*file_priv
,
3790 struct vmw_private
*dev_priv
,
3791 struct vmw_fence_obj
**p_fence
,
3796 bool synced
= false;
3798 /* p_handle implies file_priv. */
3799 BUG_ON(p_handle
!= NULL
&& file_priv
== NULL
);
3801 ret
= vmw_fifo_send_fence(dev_priv
, &sequence
);
3802 if (unlikely(ret
!= 0)) {
3803 DRM_ERROR("Fence submission error. Syncing.\n");
3807 if (p_handle
!= NULL
)
3808 ret
= vmw_user_fence_create(file_priv
, dev_priv
->fman
,
3809 sequence
, p_fence
, p_handle
);
3811 ret
= vmw_fence_create(dev_priv
->fman
, sequence
, p_fence
);
3813 if (unlikely(ret
!= 0 && !synced
)) {
3814 (void) vmw_fallback_wait(dev_priv
, false, false,
3816 VMW_FENCE_WAIT_TIMEOUT
);
3824 * vmw_execbuf_copy_fence_user - copy fence object information to
3827 * @dev_priv: Pointer to a vmw_private struct.
3828 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3829 * @ret: Return value from fence object creation.
3830 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3831 * which the information should be copied.
3832 * @fence: Pointer to the fenc object.
3833 * @fence_handle: User-space fence handle.
3834 * @out_fence_fd: exported file descriptor for the fence. -1 if not used
3835 * @sync_file: Only used to clean up in case of an error in this function.
3837 * This function copies fence information to user-space. If copying fails,
3838 * The user-space struct drm_vmw_fence_rep::error member is hopefully
3839 * left untouched, and if it's preloaded with an -EFAULT by user-space,
3840 * the error will hopefully be detected.
3841 * Also if copying fails, user-space will be unable to signal the fence
3842 * object so we wait for it immediately, and then unreference the
3843 * user-space reference.
3846 vmw_execbuf_copy_fence_user(struct vmw_private
*dev_priv
,
3847 struct vmw_fpriv
*vmw_fp
,
3849 struct drm_vmw_fence_rep __user
*user_fence_rep
,
3850 struct vmw_fence_obj
*fence
,
3851 uint32_t fence_handle
,
3852 int32_t out_fence_fd
,
3853 struct sync_file
*sync_file
)
3855 struct drm_vmw_fence_rep fence_rep
;
3857 if (user_fence_rep
== NULL
)
3860 memset(&fence_rep
, 0, sizeof(fence_rep
));
3862 fence_rep
.error
= ret
;
3863 fence_rep
.fd
= out_fence_fd
;
3865 BUG_ON(fence
== NULL
);
3867 fence_rep
.handle
= fence_handle
;
3868 fence_rep
.seqno
= fence
->base
.seqno
;
3869 vmw_update_seqno(dev_priv
, &dev_priv
->fifo
);
3870 fence_rep
.passed_seqno
= dev_priv
->last_read_seqno
;
3874 * copy_to_user errors will be detected by user space not
3875 * seeing fence_rep::error filled in. Typically
3876 * user-space would have pre-set that member to -EFAULT.
3878 ret
= copy_to_user(user_fence_rep
, &fence_rep
,
3882 * User-space lost the fence object. We need to sync
3883 * and unreference the handle.
3885 if (unlikely(ret
!= 0) && (fence_rep
.error
== 0)) {
3887 fput(sync_file
->file
);
3889 if (fence_rep
.fd
!= -1) {
3890 put_unused_fd(fence_rep
.fd
);
3894 ttm_ref_object_base_unref(vmw_fp
->tfile
,
3895 fence_handle
, TTM_REF_USAGE
);
3896 DRM_ERROR("Fence copy error. Syncing.\n");
3897 (void) vmw_fence_obj_wait(fence
, false, false,
3898 VMW_FENCE_WAIT_TIMEOUT
);
3903 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3906 * @dev_priv: Pointer to a device private structure.
3907 * @kernel_commands: Pointer to the unpatched command batch.
3908 * @command_size: Size of the unpatched command batch.
3909 * @sw_context: Structure holding the relocation lists.
3911 * Side effects: If this function returns 0, then the command batch
3912 * pointed to by @kernel_commands will have been modified.
3914 static int vmw_execbuf_submit_fifo(struct vmw_private
*dev_priv
,
3915 void *kernel_commands
,
3917 struct vmw_sw_context
*sw_context
)
3921 if (sw_context
->dx_ctx_node
)
3922 cmd
= vmw_fifo_reserve_dx(dev_priv
, command_size
,
3923 sw_context
->dx_ctx_node
->res
->id
);
3925 cmd
= vmw_fifo_reserve(dev_priv
, command_size
);
3927 DRM_ERROR("Failed reserving fifo space for commands.\n");
3931 vmw_apply_relocations(sw_context
);
3932 memcpy(cmd
, kernel_commands
, command_size
);
3933 vmw_resource_relocations_apply(cmd
, &sw_context
->res_relocations
);
3934 vmw_resource_relocations_free(&sw_context
->res_relocations
);
3935 vmw_fifo_commit(dev_priv
, command_size
);
3941 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3942 * the command buffer manager.
3944 * @dev_priv: Pointer to a device private structure.
3945 * @header: Opaque handle to the command buffer allocation.
3946 * @command_size: Size of the unpatched command batch.
3947 * @sw_context: Structure holding the relocation lists.
3949 * Side effects: If this function returns 0, then the command buffer
3950 * represented by @header will have been modified.
3952 static int vmw_execbuf_submit_cmdbuf(struct vmw_private
*dev_priv
,
3953 struct vmw_cmdbuf_header
*header
,
3955 struct vmw_sw_context
*sw_context
)
3957 u32 id
= ((sw_context
->dx_ctx_node
) ? sw_context
->dx_ctx_node
->res
->id
:
3959 void *cmd
= vmw_cmdbuf_reserve(dev_priv
->cman
, command_size
,
3962 vmw_apply_relocations(sw_context
);
3963 vmw_resource_relocations_apply(cmd
, &sw_context
->res_relocations
);
3964 vmw_resource_relocations_free(&sw_context
->res_relocations
);
3965 vmw_cmdbuf_commit(dev_priv
->cman
, command_size
, header
, false);
3971 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3972 * submission using a command buffer.
3974 * @dev_priv: Pointer to a device private structure.
3975 * @user_commands: User-space pointer to the commands to be submitted.
3976 * @command_size: Size of the unpatched command batch.
3977 * @header: Out parameter returning the opaque pointer to the command buffer.
3979 * This function checks whether we can use the command buffer manager for
3980 * submission and if so, creates a command buffer of suitable size and
3981 * copies the user data into that buffer.
3983 * On successful return, the function returns a pointer to the data in the
3984 * command buffer and *@header is set to non-NULL.
3985 * If command buffers could not be used, the function will return the value
3986 * of @kernel_commands on function call. That value may be NULL. In that case,
3987 * the value of *@header will be set to NULL.
3988 * If an error is encountered, the function will return a pointer error value.
3989 * If the function is interrupted by a signal while sleeping, it will return
3990 * -ERESTARTSYS casted to a pointer error value.
3992 static void *vmw_execbuf_cmdbuf(struct vmw_private
*dev_priv
,
3993 void __user
*user_commands
,
3994 void *kernel_commands
,
3996 struct vmw_cmdbuf_header
**header
)
4002 if (command_size
> SVGA_CB_MAX_SIZE
) {
4003 DRM_ERROR("Command buffer is too large.\n");
4004 return ERR_PTR(-EINVAL
);
4007 if (!dev_priv
->cman
|| kernel_commands
)
4008 return kernel_commands
;
4010 /* If possible, add a little space for fencing. */
4011 cmdbuf_size
= command_size
+ 512;
4012 cmdbuf_size
= min_t(size_t, cmdbuf_size
, SVGA_CB_MAX_SIZE
);
4013 kernel_commands
= vmw_cmdbuf_alloc(dev_priv
->cman
, cmdbuf_size
,
4015 if (IS_ERR(kernel_commands
))
4016 return kernel_commands
;
4018 ret
= copy_from_user(kernel_commands
, user_commands
,
4021 DRM_ERROR("Failed copying commands.\n");
4022 vmw_cmdbuf_header_free(*header
);
4024 return ERR_PTR(-EFAULT
);
4027 return kernel_commands
;
4030 static int vmw_execbuf_tie_context(struct vmw_private
*dev_priv
,
4031 struct vmw_sw_context
*sw_context
,
4034 struct vmw_resource_val_node
*ctx_node
;
4035 struct vmw_resource
*res
;
4038 if (handle
== SVGA3D_INVALID_ID
)
4041 ret
= vmw_user_resource_lookup_handle(dev_priv
, sw_context
->fp
->tfile
,
4042 handle
, user_context_converter
,
4044 if (unlikely(ret
!= 0)) {
4045 DRM_ERROR("Could not find or user DX context 0x%08x.\n",
4050 ret
= vmw_resource_val_add(sw_context
, res
, &ctx_node
);
4051 if (unlikely(ret
!= 0))
4054 sw_context
->dx_ctx_node
= ctx_node
;
4055 sw_context
->man
= vmw_context_res_man(res
);
4057 vmw_resource_unreference(&res
);
4061 int vmw_execbuf_process(struct drm_file
*file_priv
,
4062 struct vmw_private
*dev_priv
,
4063 void __user
*user_commands
,
4064 void *kernel_commands
,
4065 uint32_t command_size
,
4066 uint64_t throttle_us
,
4067 uint32_t dx_context_handle
,
4068 struct drm_vmw_fence_rep __user
*user_fence_rep
,
4069 struct vmw_fence_obj
**out_fence
,
4072 struct vmw_sw_context
*sw_context
= &dev_priv
->ctx
;
4073 struct vmw_fence_obj
*fence
= NULL
;
4074 struct vmw_resource
*error_resource
;
4075 struct list_head resource_list
;
4076 struct vmw_cmdbuf_header
*header
;
4077 struct ww_acquire_ctx ticket
;
4080 int32_t out_fence_fd
= -1;
4081 struct sync_file
*sync_file
= NULL
;
4084 if (flags
& DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD
) {
4085 out_fence_fd
= get_unused_fd_flags(O_CLOEXEC
);
4086 if (out_fence_fd
< 0) {
4087 DRM_ERROR("Failed to get a fence file descriptor.\n");
4088 return out_fence_fd
;
4093 ret
= vmw_wait_lag(dev_priv
, &dev_priv
->fifo
.marker_queue
,
4097 goto out_free_fence_fd
;
4100 kernel_commands
= vmw_execbuf_cmdbuf(dev_priv
, user_commands
,
4101 kernel_commands
, command_size
,
4103 if (IS_ERR(kernel_commands
)) {
4104 ret
= PTR_ERR(kernel_commands
);
4105 goto out_free_fence_fd
;
4108 ret
= mutex_lock_interruptible(&dev_priv
->cmdbuf_mutex
);
4111 goto out_free_header
;
4114 sw_context
->kernel
= false;
4115 if (kernel_commands
== NULL
) {
4116 ret
= vmw_resize_cmd_bounce(sw_context
, command_size
);
4117 if (unlikely(ret
!= 0))
4121 ret
= copy_from_user(sw_context
->cmd_bounce
,
4122 user_commands
, command_size
);
4124 if (unlikely(ret
!= 0)) {
4126 DRM_ERROR("Failed copying commands.\n");
4129 kernel_commands
= sw_context
->cmd_bounce
;
4131 sw_context
->kernel
= true;
4133 sw_context
->fp
= vmw_fpriv(file_priv
);
4134 sw_context
->cur_reloc
= 0;
4135 sw_context
->cur_val_buf
= 0;
4136 INIT_LIST_HEAD(&sw_context
->resource_list
);
4137 INIT_LIST_HEAD(&sw_context
->ctx_resource_list
);
4138 sw_context
->cur_query_bo
= dev_priv
->pinned_bo
;
4139 sw_context
->last_query_ctx
= NULL
;
4140 sw_context
->needs_post_query_barrier
= false;
4141 sw_context
->dx_ctx_node
= NULL
;
4142 sw_context
->dx_query_mob
= NULL
;
4143 sw_context
->dx_query_ctx
= NULL
;
4144 memset(sw_context
->res_cache
, 0, sizeof(sw_context
->res_cache
));
4145 INIT_LIST_HEAD(&sw_context
->validate_nodes
);
4146 INIT_LIST_HEAD(&sw_context
->res_relocations
);
4147 if (sw_context
->staged_bindings
)
4148 vmw_binding_state_reset(sw_context
->staged_bindings
);
4150 if (!sw_context
->res_ht_initialized
) {
4151 ret
= drm_ht_create(&sw_context
->res_ht
, VMW_RES_HT_ORDER
);
4152 if (unlikely(ret
!= 0))
4154 sw_context
->res_ht_initialized
= true;
4156 INIT_LIST_HEAD(&sw_context
->staged_cmd_res
);
4157 INIT_LIST_HEAD(&resource_list
);
4158 ret
= vmw_execbuf_tie_context(dev_priv
, sw_context
, dx_context_handle
);
4159 if (unlikely(ret
!= 0)) {
4160 list_splice_init(&sw_context
->ctx_resource_list
,
4161 &sw_context
->resource_list
);
4165 ret
= vmw_cmd_check_all(dev_priv
, sw_context
, kernel_commands
,
4168 * Merge the resource lists before checking the return status
4169 * from vmd_cmd_check_all so that all the open hashtabs will
4170 * be handled properly even if vmw_cmd_check_all fails.
4172 list_splice_init(&sw_context
->ctx_resource_list
,
4173 &sw_context
->resource_list
);
4175 if (unlikely(ret
!= 0))
4178 ret
= vmw_resources_reserve(sw_context
);
4179 if (unlikely(ret
!= 0))
4182 ret
= ttm_eu_reserve_buffers(&ticket
, &sw_context
->validate_nodes
,
4184 if (unlikely(ret
!= 0))
4187 ret
= vmw_validate_buffers(dev_priv
, sw_context
);
4188 if (unlikely(ret
!= 0))
4191 ret
= vmw_resources_validate(sw_context
);
4192 if (unlikely(ret
!= 0))
4195 ret
= mutex_lock_interruptible(&dev_priv
->binding_mutex
);
4196 if (unlikely(ret
!= 0)) {
4201 if (dev_priv
->has_mob
) {
4202 ret
= vmw_rebind_contexts(sw_context
);
4203 if (unlikely(ret
!= 0))
4204 goto out_unlock_binding
;
4208 ret
= vmw_execbuf_submit_fifo(dev_priv
, kernel_commands
,
4209 command_size
, sw_context
);
4211 ret
= vmw_execbuf_submit_cmdbuf(dev_priv
, header
, command_size
,
4215 mutex_unlock(&dev_priv
->binding_mutex
);
4219 vmw_query_bo_switch_commit(dev_priv
, sw_context
);
4220 ret
= vmw_execbuf_fence_commands(file_priv
, dev_priv
,
4222 (user_fence_rep
) ? &handle
: NULL
);
4224 * This error is harmless, because if fence submission fails,
4225 * vmw_fifo_send_fence will sync. The error will be propagated to
4226 * user-space in @fence_rep
4230 DRM_ERROR("Fence submission error. Syncing.\n");
4232 vmw_resources_unreserve(sw_context
, false);
4234 ttm_eu_fence_buffer_objects(&ticket
, &sw_context
->validate_nodes
,
4237 if (unlikely(dev_priv
->pinned_bo
!= NULL
&&
4238 !dev_priv
->query_cid_valid
))
4239 __vmw_execbuf_release_pinned_bo(dev_priv
, fence
);
4241 vmw_clear_validations(sw_context
);
4244 * If anything fails here, give up trying to export the fence
4245 * and do a sync since the user mode will not be able to sync
4246 * the fence itself. This ensures we are still functionally
4249 if (flags
& DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD
) {
4251 sync_file
= sync_file_create(&fence
->base
);
4253 DRM_ERROR("Unable to create sync file for fence\n");
4254 put_unused_fd(out_fence_fd
);
4257 (void) vmw_fence_obj_wait(fence
, false, false,
4258 VMW_FENCE_WAIT_TIMEOUT
);
4260 /* Link the fence with the FD created earlier */
4261 fd_install(out_fence_fd
, sync_file
->file
);
4265 vmw_execbuf_copy_fence_user(dev_priv
, vmw_fpriv(file_priv
), ret
,
4266 user_fence_rep
, fence
, handle
,
4267 out_fence_fd
, sync_file
);
4269 /* Don't unreference when handing fence out */
4270 if (unlikely(out_fence
!= NULL
)) {
4273 } else if (likely(fence
!= NULL
)) {
4274 vmw_fence_obj_unreference(&fence
);
4277 list_splice_init(&sw_context
->resource_list
, &resource_list
);
4278 vmw_cmdbuf_res_commit(&sw_context
->staged_cmd_res
);
4279 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
4282 * Unreference resources outside of the cmdbuf_mutex to
4283 * avoid deadlocks in resource destruction paths.
4285 vmw_resource_list_unreference(sw_context
, &resource_list
);
4290 mutex_unlock(&dev_priv
->binding_mutex
);
4292 ttm_eu_backoff_reservation(&ticket
, &sw_context
->validate_nodes
);
4294 vmw_resources_unreserve(sw_context
, true);
4295 vmw_resource_relocations_free(&sw_context
->res_relocations
);
4296 vmw_free_relocations(sw_context
);
4297 vmw_clear_validations(sw_context
);
4298 if (unlikely(dev_priv
->pinned_bo
!= NULL
&&
4299 !dev_priv
->query_cid_valid
))
4300 __vmw_execbuf_release_pinned_bo(dev_priv
, NULL
);
4302 list_splice_init(&sw_context
->resource_list
, &resource_list
);
4303 error_resource
= sw_context
->error_resource
;
4304 sw_context
->error_resource
= NULL
;
4305 vmw_cmdbuf_res_revert(&sw_context
->staged_cmd_res
);
4306 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
4309 * Unreference resources outside of the cmdbuf_mutex to
4310 * avoid deadlocks in resource destruction paths.
4312 vmw_resource_list_unreference(sw_context
, &resource_list
);
4313 if (unlikely(error_resource
!= NULL
))
4314 vmw_resource_unreference(&error_resource
);
4317 vmw_cmdbuf_header_free(header
);
4319 if (out_fence_fd
>= 0)
4320 put_unused_fd(out_fence_fd
);
4326 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4328 * @dev_priv: The device private structure.
4330 * This function is called to idle the fifo and unpin the query buffer
4331 * if the normal way to do this hits an error, which should typically be
4334 static void vmw_execbuf_unpin_panic(struct vmw_private
*dev_priv
)
4336 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
4338 (void) vmw_fallback_wait(dev_priv
, false, true, 0, false, 10*HZ
);
4339 vmw_bo_pin_reserved(dev_priv
->pinned_bo
, false);
4340 if (dev_priv
->dummy_query_bo_pinned
) {
4341 vmw_bo_pin_reserved(dev_priv
->dummy_query_bo
, false);
4342 dev_priv
->dummy_query_bo_pinned
= false;
4348 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4351 * @dev_priv: The device private structure.
4352 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
4353 * _after_ a query barrier that flushes all queries touching the current
4354 * buffer pointed to by @dev_priv->pinned_bo
4356 * This function should be used to unpin the pinned query bo, or
4357 * as a query barrier when we need to make sure that all queries have
4358 * finished before the next fifo command. (For example on hardware
4359 * context destructions where the hardware may otherwise leak unfinished
4362 * This function does not return any failure codes, but make attempts
4363 * to do safe unpinning in case of errors.
4365 * The function will synchronize on the previous query barrier, and will
4366 * thus not finish until that barrier has executed.
4368 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
4369 * before calling this function.
4371 void __vmw_execbuf_release_pinned_bo(struct vmw_private
*dev_priv
,
4372 struct vmw_fence_obj
*fence
)
4375 struct list_head validate_list
;
4376 struct ttm_validate_buffer pinned_val
, query_val
;
4377 struct vmw_fence_obj
*lfence
= NULL
;
4378 struct ww_acquire_ctx ticket
;
4380 if (dev_priv
->pinned_bo
== NULL
)
4383 INIT_LIST_HEAD(&validate_list
);
4385 pinned_val
.bo
= ttm_bo_reference(&dev_priv
->pinned_bo
->base
);
4386 pinned_val
.shared
= false;
4387 list_add_tail(&pinned_val
.head
, &validate_list
);
4389 query_val
.bo
= ttm_bo_reference(&dev_priv
->dummy_query_bo
->base
);
4390 query_val
.shared
= false;
4391 list_add_tail(&query_val
.head
, &validate_list
);
4393 ret
= ttm_eu_reserve_buffers(&ticket
, &validate_list
,
4395 if (unlikely(ret
!= 0)) {
4396 vmw_execbuf_unpin_panic(dev_priv
);
4397 goto out_no_reserve
;
4400 if (dev_priv
->query_cid_valid
) {
4401 BUG_ON(fence
!= NULL
);
4402 ret
= vmw_fifo_emit_dummy_query(dev_priv
, dev_priv
->query_cid
);
4403 if (unlikely(ret
!= 0)) {
4404 vmw_execbuf_unpin_panic(dev_priv
);
4407 dev_priv
->query_cid_valid
= false;
4410 vmw_bo_pin_reserved(dev_priv
->pinned_bo
, false);
4411 if (dev_priv
->dummy_query_bo_pinned
) {
4412 vmw_bo_pin_reserved(dev_priv
->dummy_query_bo
, false);
4413 dev_priv
->dummy_query_bo_pinned
= false;
4415 if (fence
== NULL
) {
4416 (void) vmw_execbuf_fence_commands(NULL
, dev_priv
, &lfence
,
4420 ttm_eu_fence_buffer_objects(&ticket
, &validate_list
, (void *) fence
);
4422 vmw_fence_obj_unreference(&lfence
);
4424 ttm_bo_unref(&query_val
.bo
);
4425 ttm_bo_unref(&pinned_val
.bo
);
4426 vmw_dmabuf_unreference(&dev_priv
->pinned_bo
);
4431 ttm_eu_backoff_reservation(&ticket
, &validate_list
);
4433 ttm_bo_unref(&query_val
.bo
);
4434 ttm_bo_unref(&pinned_val
.bo
);
4435 vmw_dmabuf_unreference(&dev_priv
->pinned_bo
);
4439 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4442 * @dev_priv: The device private structure.
4444 * This function should be used to unpin the pinned query bo, or
4445 * as a query barrier when we need to make sure that all queries have
4446 * finished before the next fifo command. (For example on hardware
4447 * context destructions where the hardware may otherwise leak unfinished
4450 * This function does not return any failure codes, but make attempts
4451 * to do safe unpinning in case of errors.
4453 * The function will synchronize on the previous query barrier, and will
4454 * thus not finish until that barrier has executed.
4456 void vmw_execbuf_release_pinned_bo(struct vmw_private
*dev_priv
)
4458 mutex_lock(&dev_priv
->cmdbuf_mutex
);
4459 if (dev_priv
->query_cid_valid
)
4460 __vmw_execbuf_release_pinned_bo(dev_priv
, NULL
);
4461 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
4464 int vmw_execbuf_ioctl(struct drm_device
*dev
, unsigned long data
,
4465 struct drm_file
*file_priv
, size_t size
)
4467 struct vmw_private
*dev_priv
= vmw_priv(dev
);
4468 struct drm_vmw_execbuf_arg arg
;
4470 static const size_t copy_offset
[] = {
4471 offsetof(struct drm_vmw_execbuf_arg
, context_handle
),
4472 sizeof(struct drm_vmw_execbuf_arg
)};
4473 struct dma_fence
*in_fence
= NULL
;
4475 if (unlikely(size
< copy_offset
[0])) {
4476 DRM_ERROR("Invalid command size, ioctl %d\n",
4481 if (copy_from_user(&arg
, (void __user
*) data
, copy_offset
[0]) != 0)
4485 * Extend the ioctl argument while
4486 * maintaining backwards compatibility:
4487 * We take different code paths depending on the value of
4491 if (unlikely(arg
.version
> DRM_VMW_EXECBUF_VERSION
||
4492 arg
.version
== 0)) {
4493 DRM_ERROR("Incorrect execbuf version.\n");
4497 if (arg
.version
> 1 &&
4498 copy_from_user(&arg
.context_handle
,
4499 (void __user
*) (data
+ copy_offset
[0]),
4500 copy_offset
[arg
.version
- 1] -
4501 copy_offset
[0]) != 0)
4504 switch (arg
.version
) {
4506 arg
.context_handle
= (uint32_t) -1;
4514 /* If imported a fence FD from elsewhere, then wait on it */
4515 if (arg
.flags
& DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD
) {
4516 in_fence
= sync_file_get_fence(arg
.imported_fence_fd
);
4519 DRM_ERROR("Cannot get imported fence\n");
4523 ret
= vmw_wait_dma_fence(dev_priv
->fman
, in_fence
);
4528 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
4529 if (unlikely(ret
!= 0))
4532 ret
= vmw_execbuf_process(file_priv
, dev_priv
,
4533 (void __user
*)(unsigned long)arg
.commands
,
4534 NULL
, arg
.command_size
, arg
.throttle_us
,
4536 (void __user
*)(unsigned long)arg
.fence_rep
,
4539 ttm_read_unlock(&dev_priv
->reservation_sem
);
4540 if (unlikely(ret
!= 0))
4543 vmw_kms_cursor_post_execbuf(dev_priv
);
4547 dma_fence_put(in_fence
);