1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
27 #include <linux/sync_file.h>
29 #include "vmwgfx_drv.h"
30 #include "vmwgfx_reg.h"
31 #include <drm/ttm/ttm_bo_api.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include "vmwgfx_so.h"
34 #include "vmwgfx_binding.h"
36 #define VMW_RES_HT_ORDER 12
39 * Helper macro to get dx_ctx_node if available otherwise print an error
40 * message. This is for use in command verifier function where if dx_ctx_node
41 * is not set then command is invalid.
43 #define VMW_GET_CTX_NODE(__sw_context) \
45 __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \
46 VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \
47 __sw_context->dx_ctx_node; \
51 #define VMW_DECLARE_CMD_VAR(__var, __type) \
53 SVGA3dCmdHeader header; \
58 * struct vmw_relocation - Buffer object relocation
60 * @head: List head for the command submission context's relocation list
61 * @vbo: Non ref-counted pointer to buffer object
62 * @mob_loc: Pointer to location for mob id to be modified
63 * @location: Pointer to location for guest pointer to be modified
65 struct vmw_relocation
{
66 struct list_head head
;
67 struct vmw_buffer_object
*vbo
;
70 SVGAGuestPtr
*location
;
75 * enum vmw_resource_relocation_type - Relocation type for resources
77 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
78 * command stream is replaced with the actual id after validation.
79 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
81 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
82 * validation is -1, the command is replaced with a NOP. Otherwise no action.
84 enum vmw_resource_relocation_type
{
92 * struct vmw_resource_relocation - Relocation info for resources
94 * @head: List head for the software context's relocation list.
95 * @res: Non-ref-counted pointer to the resource.
96 * @offset: Offset of single byte entries into the command buffer where the id
97 * that needs fixup is located.
98 * @rel_type: Type of relocation.
100 struct vmw_resource_relocation
{
101 struct list_head head
;
102 const struct vmw_resource
*res
;
104 enum vmw_resource_relocation_type rel_type
:3;
108 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
110 * @head: List head of context list
111 * @ctx: The context resource
112 * @cur: The context's persistent binding state
113 * @staged: The binding state changes of this command buffer
115 struct vmw_ctx_validation_info
{
116 struct list_head head
;
117 struct vmw_resource
*ctx
;
118 struct vmw_ctx_binding_state
*cur
;
119 struct vmw_ctx_binding_state
*staged
;
123 * struct vmw_cmd_entry - Describe a command for the verifier
125 * @user_allow: Whether allowed from the execbuf ioctl.
126 * @gb_disable: Whether disabled if guest-backed objects are available.
127 * @gb_enable: Whether enabled iff guest-backed objects are available.
129 struct vmw_cmd_entry
{
130 int (*func
) (struct vmw_private
*, struct vmw_sw_context
*,
135 const char *cmd_name
;
138 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
139 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
140 (_gb_disable), (_gb_enable), #_cmd}
142 static int vmw_resource_context_res_add(struct vmw_private
*dev_priv
,
143 struct vmw_sw_context
*sw_context
,
144 struct vmw_resource
*ctx
);
145 static int vmw_translate_mob_ptr(struct vmw_private
*dev_priv
,
146 struct vmw_sw_context
*sw_context
,
148 struct vmw_buffer_object
**vmw_bo_p
);
150 * vmw_ptr_diff - Compute the offset from a to b in bytes
152 * @a: A starting pointer.
153 * @b: A pointer offset in the same address space.
155 * Returns: The offset in bytes between the two pointers.
157 static size_t vmw_ptr_diff(void *a
, void *b
)
159 return (unsigned long) b
- (unsigned long) a
;
163 * vmw_execbuf_bindings_commit - Commit modified binding state
165 * @sw_context: The command submission context
166 * @backoff: Whether this is part of the error path and binding state changes
169 static void vmw_execbuf_bindings_commit(struct vmw_sw_context
*sw_context
,
172 struct vmw_ctx_validation_info
*entry
;
174 list_for_each_entry(entry
, &sw_context
->ctx_list
, head
) {
176 vmw_binding_state_commit(entry
->cur
, entry
->staged
);
178 if (entry
->staged
!= sw_context
->staged_bindings
)
179 vmw_binding_state_free(entry
->staged
);
181 sw_context
->staged_bindings_inuse
= false;
184 /* List entries are freed with the validation context */
185 INIT_LIST_HEAD(&sw_context
->ctx_list
);
189 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
191 * @sw_context: The command submission context
193 static void vmw_bind_dx_query_mob(struct vmw_sw_context
*sw_context
)
195 if (sw_context
->dx_query_mob
)
196 vmw_context_bind_dx_query(sw_context
->dx_query_ctx
,
197 sw_context
->dx_query_mob
);
201 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
204 * @dev_priv: Pointer to the device private:
205 * @sw_context: The command submission context
206 * @node: The validation node holding the context resource metadata
208 static int vmw_cmd_ctx_first_setup(struct vmw_private
*dev_priv
,
209 struct vmw_sw_context
*sw_context
,
210 struct vmw_resource
*res
,
211 struct vmw_ctx_validation_info
*node
)
215 ret
= vmw_resource_context_res_add(dev_priv
, sw_context
, res
);
216 if (unlikely(ret
!= 0))
219 if (!sw_context
->staged_bindings
) {
220 sw_context
->staged_bindings
= vmw_binding_state_alloc(dev_priv
);
221 if (IS_ERR(sw_context
->staged_bindings
)) {
222 ret
= PTR_ERR(sw_context
->staged_bindings
);
223 sw_context
->staged_bindings
= NULL
;
228 if (sw_context
->staged_bindings_inuse
) {
229 node
->staged
= vmw_binding_state_alloc(dev_priv
);
230 if (IS_ERR(node
->staged
)) {
231 ret
= PTR_ERR(node
->staged
);
236 node
->staged
= sw_context
->staged_bindings
;
237 sw_context
->staged_bindings_inuse
= true;
241 node
->cur
= vmw_context_binding_state(res
);
242 list_add_tail(&node
->head
, &sw_context
->ctx_list
);
251 * vmw_execbuf_res_size - calculate extra size fore the resource validation node
253 * @dev_priv: Pointer to the device private struct.
254 * @res_type: The resource type.
256 * Guest-backed contexts and DX contexts require extra size to store execbuf
257 * private information in the validation node. Typically the binding manager
258 * associated data structures.
260 * Returns: The extra size requirement based on resource type.
262 static unsigned int vmw_execbuf_res_size(struct vmw_private
*dev_priv
,
263 enum vmw_res_type res_type
)
265 return (res_type
== vmw_res_dx_context
||
266 (res_type
== vmw_res_context
&& dev_priv
->has_mob
)) ?
267 sizeof(struct vmw_ctx_validation_info
) : 0;
271 * vmw_execbuf_rcache_update - Update a resource-node cache entry
273 * @rcache: Pointer to the entry to update.
274 * @res: Pointer to the resource.
275 * @private: Pointer to the execbuf-private space in the resource validation
278 static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry
*rcache
,
279 struct vmw_resource
*res
,
283 rcache
->private = private;
285 rcache
->valid_handle
= 0;
289 * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
290 * rcu-protected pointer to the validation list.
292 * @sw_context: Pointer to the software context.
293 * @res: Unreferenced rcu-protected pointer to the resource.
294 * @dirty: Whether to change dirty status.
296 * Returns: 0 on success. Negative error code on failure. Typical error codes
297 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
299 static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context
*sw_context
,
300 struct vmw_resource
*res
,
303 struct vmw_private
*dev_priv
= res
->dev_priv
;
305 enum vmw_res_type res_type
= vmw_res_type(res
);
306 struct vmw_res_cache_entry
*rcache
;
307 struct vmw_ctx_validation_info
*ctx_info
;
309 unsigned int priv_size
;
311 rcache
= &sw_context
->res_cache
[res_type
];
312 if (likely(rcache
->valid
&& rcache
->res
== res
)) {
314 vmw_validation_res_set_dirty(sw_context
->ctx
,
315 rcache
->private, dirty
);
316 vmw_user_resource_noref_release();
320 priv_size
= vmw_execbuf_res_size(dev_priv
, res_type
);
321 ret
= vmw_validation_add_resource(sw_context
->ctx
, res
, priv_size
,
322 dirty
, (void **)&ctx_info
,
324 vmw_user_resource_noref_release();
328 if (priv_size
&& first_usage
) {
329 ret
= vmw_cmd_ctx_first_setup(dev_priv
, sw_context
, res
,
332 VMW_DEBUG_USER("Failed first usage context setup.\n");
337 vmw_execbuf_rcache_update(rcache
, res
, ctx_info
);
342 * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
343 * validation list if it's not already on it
345 * @sw_context: Pointer to the software context.
346 * @res: Pointer to the resource.
347 * @dirty: Whether to change dirty status.
349 * Returns: Zero on success. Negative error code on failure.
351 static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context
*sw_context
,
352 struct vmw_resource
*res
,
355 struct vmw_res_cache_entry
*rcache
;
356 enum vmw_res_type res_type
= vmw_res_type(res
);
360 rcache
= &sw_context
->res_cache
[res_type
];
361 if (likely(rcache
->valid
&& rcache
->res
== res
)) {
363 vmw_validation_res_set_dirty(sw_context
->ctx
,
364 rcache
->private, dirty
);
368 ret
= vmw_validation_add_resource(sw_context
->ctx
, res
, 0, dirty
,
373 vmw_execbuf_rcache_update(rcache
, res
, ptr
);
379 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
382 * @sw_context: The software context holding the validation list.
383 * @view: Pointer to the view resource.
385 * Returns 0 if success, negative error code otherwise.
387 static int vmw_view_res_val_add(struct vmw_sw_context
*sw_context
,
388 struct vmw_resource
*view
)
393 * First add the resource the view is pointing to, otherwise it may be
394 * swapped out when the view is validated.
396 ret
= vmw_execbuf_res_noctx_val_add(sw_context
, vmw_view_srf(view
),
397 vmw_view_dirtying(view
));
401 return vmw_execbuf_res_noctx_val_add(sw_context
, view
,
406 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
407 * to to the validation list.
409 * @sw_context: The software context holding the validation list.
410 * @view_type: The view type to look up.
411 * @id: view id of the view.
413 * The view is represented by a view id and the DX context it's created on, or
414 * scheduled for creation on. If there is no DX context set, the function will
415 * return an -EINVAL error pointer.
417 * Returns: Unreferenced pointer to the resource on success, negative error
418 * pointer on failure.
420 static struct vmw_resource
*
421 vmw_view_id_val_add(struct vmw_sw_context
*sw_context
,
422 enum vmw_view_type view_type
, u32 id
)
424 struct vmw_ctx_validation_info
*ctx_node
= sw_context
->dx_ctx_node
;
425 struct vmw_resource
*view
;
429 return ERR_PTR(-EINVAL
);
431 view
= vmw_view_lookup(sw_context
->man
, view_type
, id
);
435 ret
= vmw_view_res_val_add(sw_context
, view
);
443 * vmw_resource_context_res_add - Put resources previously bound to a context on
444 * the validation list
446 * @dev_priv: Pointer to a device private structure
447 * @sw_context: Pointer to a software context used for this command submission
448 * @ctx: Pointer to the context resource
450 * This function puts all resources that were previously bound to @ctx on the
451 * resource validation list. This is part of the context state reemission
453 static int vmw_resource_context_res_add(struct vmw_private
*dev_priv
,
454 struct vmw_sw_context
*sw_context
,
455 struct vmw_resource
*ctx
)
457 struct list_head
*binding_list
;
458 struct vmw_ctx_bindinfo
*entry
;
460 struct vmw_resource
*res
;
462 u32 cotable_max
= has_sm5_context(ctx
->dev_priv
) ?
463 SVGA_COTABLE_MAX
: SVGA_COTABLE_DX10_MAX
;
465 /* Add all cotables to the validation list. */
466 if (has_sm4_context(dev_priv
) &&
467 vmw_res_type(ctx
) == vmw_res_dx_context
) {
468 for (i
= 0; i
< cotable_max
; ++i
) {
469 res
= vmw_context_cotable(ctx
, i
);
473 ret
= vmw_execbuf_res_noctx_val_add(sw_context
, res
,
475 if (unlikely(ret
!= 0))
480 /* Add all resources bound to the context to the validation list */
481 mutex_lock(&dev_priv
->binding_mutex
);
482 binding_list
= vmw_context_binding_list(ctx
);
484 list_for_each_entry(entry
, binding_list
, ctx_list
) {
485 if (vmw_res_type(entry
->res
) == vmw_res_view
)
486 ret
= vmw_view_res_val_add(sw_context
, entry
->res
);
488 ret
= vmw_execbuf_res_noctx_val_add
489 (sw_context
, entry
->res
,
490 vmw_binding_dirtying(entry
->bt
));
491 if (unlikely(ret
!= 0))
495 if (has_sm4_context(dev_priv
) &&
496 vmw_res_type(ctx
) == vmw_res_dx_context
) {
497 struct vmw_buffer_object
*dx_query_mob
;
499 dx_query_mob
= vmw_context_get_dx_query_mob(ctx
);
501 ret
= vmw_validation_add_bo(sw_context
->ctx
,
502 dx_query_mob
, true, false);
505 mutex_unlock(&dev_priv
->binding_mutex
);
510 * vmw_resource_relocation_add - Add a relocation to the relocation list
512 * @list: Pointer to head of relocation list.
513 * @res: The resource.
514 * @offset: Offset into the command buffer currently being parsed where the id
515 * that needs fixup is located. Granularity is one byte.
516 * @rel_type: Relocation type.
518 static int vmw_resource_relocation_add(struct vmw_sw_context
*sw_context
,
519 const struct vmw_resource
*res
,
520 unsigned long offset
,
521 enum vmw_resource_relocation_type
524 struct vmw_resource_relocation
*rel
;
526 rel
= vmw_validation_mem_alloc(sw_context
->ctx
, sizeof(*rel
));
527 if (unlikely(!rel
)) {
528 VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
533 rel
->offset
= offset
;
534 rel
->rel_type
= rel_type
;
535 list_add_tail(&rel
->head
, &sw_context
->res_relocations
);
541 * vmw_resource_relocations_free - Free all relocations on a list
543 * @list: Pointer to the head of the relocation list
545 static void vmw_resource_relocations_free(struct list_head
*list
)
547 /* Memory is validation context memory, so no need to free it */
548 INIT_LIST_HEAD(list
);
552 * vmw_resource_relocations_apply - Apply all relocations on a list
554 * @cb: Pointer to the start of the command buffer bein patch. This need not be
555 * the same buffer as the one being parsed when the relocation list was built,
556 * but the contents must be the same modulo the resource ids.
557 * @list: Pointer to the head of the relocation list.
559 static void vmw_resource_relocations_apply(uint32_t *cb
,
560 struct list_head
*list
)
562 struct vmw_resource_relocation
*rel
;
564 /* Validate the struct vmw_resource_relocation member size */
565 BUILD_BUG_ON(SVGA_CB_MAX_SIZE
>= (1 << 29));
566 BUILD_BUG_ON(vmw_res_rel_max
>= (1 << 3));
568 list_for_each_entry(rel
, list
, head
) {
569 u32
*addr
= (u32
*)((unsigned long) cb
+ rel
->offset
);
570 switch (rel
->rel_type
) {
571 case vmw_res_rel_normal
:
572 *addr
= rel
->res
->id
;
574 case vmw_res_rel_nop
:
575 *addr
= SVGA_3D_CMD_NOP
;
578 if (rel
->res
->id
== -1)
579 *addr
= SVGA_3D_CMD_NOP
;
585 static int vmw_cmd_invalid(struct vmw_private
*dev_priv
,
586 struct vmw_sw_context
*sw_context
,
587 SVGA3dCmdHeader
*header
)
592 static int vmw_cmd_ok(struct vmw_private
*dev_priv
,
593 struct vmw_sw_context
*sw_context
,
594 SVGA3dCmdHeader
*header
)
600 * vmw_resources_reserve - Reserve all resources on the sw_context's resource
603 * @sw_context: Pointer to the software context.
605 * Note that since vmware's command submission currently is protected by the
606 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
607 * only a single thread at once will attempt this.
609 static int vmw_resources_reserve(struct vmw_sw_context
*sw_context
)
613 ret
= vmw_validation_res_reserve(sw_context
->ctx
, true);
617 if (sw_context
->dx_query_mob
) {
618 struct vmw_buffer_object
*expected_dx_query_mob
;
620 expected_dx_query_mob
=
621 vmw_context_get_dx_query_mob(sw_context
->dx_query_ctx
);
622 if (expected_dx_query_mob
&&
623 expected_dx_query_mob
!= sw_context
->dx_query_mob
) {
632 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
633 * resource validate list unless it's already there.
635 * @dev_priv: Pointer to a device private structure.
636 * @sw_context: Pointer to the software context.
637 * @res_type: Resource type.
638 * @dirty: Whether to change dirty status.
639 * @converter: User-space visisble type specific information.
640 * @id_loc: Pointer to the location in the command buffer currently being parsed
641 * from where the user-space resource id handle is located.
642 * @p_val: Pointer to pointer to resource validalidation node. Populated on
646 vmw_cmd_res_check(struct vmw_private
*dev_priv
,
647 struct vmw_sw_context
*sw_context
,
648 enum vmw_res_type res_type
,
650 const struct vmw_user_resource_conv
*converter
,
652 struct vmw_resource
**p_res
)
654 struct vmw_res_cache_entry
*rcache
= &sw_context
->res_cache
[res_type
];
655 struct vmw_resource
*res
;
661 if (*id_loc
== SVGA3D_INVALID_ID
) {
662 if (res_type
== vmw_res_context
) {
663 VMW_DEBUG_USER("Illegal context invalid id.\n");
669 if (likely(rcache
->valid_handle
&& *id_loc
== rcache
->handle
)) {
672 vmw_validation_res_set_dirty(sw_context
->ctx
,
673 rcache
->private, dirty
);
675 unsigned int size
= vmw_execbuf_res_size(dev_priv
, res_type
);
677 ret
= vmw_validation_preload_res(sw_context
->ctx
, size
);
681 res
= vmw_user_resource_noref_lookup_handle
682 (dev_priv
, sw_context
->fp
->tfile
, *id_loc
, converter
);
684 VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
685 (unsigned int) *id_loc
);
689 ret
= vmw_execbuf_res_noref_val_add(sw_context
, res
, dirty
);
690 if (unlikely(ret
!= 0))
693 if (rcache
->valid
&& rcache
->res
== res
) {
694 rcache
->valid_handle
= true;
695 rcache
->handle
= *id_loc
;
699 ret
= vmw_resource_relocation_add(sw_context
, res
,
700 vmw_ptr_diff(sw_context
->buf_start
,
710 * vmw_rebind_dx_query - Rebind DX query associated with the context
712 * @ctx_res: context the query belongs to
714 * This function assumes binding_mutex is held.
716 static int vmw_rebind_all_dx_query(struct vmw_resource
*ctx_res
)
718 struct vmw_private
*dev_priv
= ctx_res
->dev_priv
;
719 struct vmw_buffer_object
*dx_query_mob
;
720 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXBindAllQuery
);
722 dx_query_mob
= vmw_context_get_dx_query_mob(ctx_res
);
724 if (!dx_query_mob
|| dx_query_mob
->dx_query_ctx
)
727 cmd
= VMW_FIFO_RESERVE_DX(dev_priv
, sizeof(*cmd
), ctx_res
->id
);
731 cmd
->header
.id
= SVGA_3D_CMD_DX_BIND_ALL_QUERY
;
732 cmd
->header
.size
= sizeof(cmd
->body
);
733 cmd
->body
.cid
= ctx_res
->id
;
734 cmd
->body
.mobid
= dx_query_mob
->base
.mem
.start
;
735 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
737 vmw_context_bind_dx_query(ctx_res
, dx_query_mob
);
743 * vmw_rebind_contexts - Rebind all resources previously bound to referenced
746 * @sw_context: Pointer to the software context.
748 * Rebind context binding points that have been scrubbed because of eviction.
750 static int vmw_rebind_contexts(struct vmw_sw_context
*sw_context
)
752 struct vmw_ctx_validation_info
*val
;
755 list_for_each_entry(val
, &sw_context
->ctx_list
, head
) {
756 ret
= vmw_binding_rebind_all(val
->cur
);
757 if (unlikely(ret
!= 0)) {
758 if (ret
!= -ERESTARTSYS
)
759 VMW_DEBUG_USER("Failed to rebind context.\n");
763 ret
= vmw_rebind_all_dx_query(val
->ctx
);
765 VMW_DEBUG_USER("Failed to rebind queries.\n");
774 * vmw_view_bindings_add - Add an array of view bindings to a context binding
777 * @sw_context: The execbuf state used for this command.
778 * @view_type: View type for the bindings.
779 * @binding_type: Binding type for the bindings.
780 * @shader_slot: The shader slot to user for the bindings.
781 * @view_ids: Array of view ids to be bound.
782 * @num_views: Number of view ids in @view_ids.
783 * @first_slot: The binding slot to be used for the first view id in @view_ids.
785 static int vmw_view_bindings_add(struct vmw_sw_context
*sw_context
,
786 enum vmw_view_type view_type
,
787 enum vmw_ctx_binding_type binding_type
,
789 uint32 view_ids
[], u32 num_views
,
792 struct vmw_ctx_validation_info
*ctx_node
= VMW_GET_CTX_NODE(sw_context
);
798 for (i
= 0; i
< num_views
; ++i
) {
799 struct vmw_ctx_bindinfo_view binding
;
800 struct vmw_resource
*view
= NULL
;
802 if (view_ids
[i
] != SVGA3D_INVALID_ID
) {
803 view
= vmw_view_id_val_add(sw_context
, view_type
,
806 VMW_DEBUG_USER("View not found.\n");
807 return PTR_ERR(view
);
810 binding
.bi
.ctx
= ctx_node
->ctx
;
811 binding
.bi
.res
= view
;
812 binding
.bi
.bt
= binding_type
;
813 binding
.shader_slot
= shader_slot
;
814 binding
.slot
= first_slot
+ i
;
815 vmw_binding_add(ctx_node
->staged
, &binding
.bi
,
816 shader_slot
, binding
.slot
);
823 * vmw_cmd_cid_check - Check a command header for valid context information.
825 * @dev_priv: Pointer to a device private structure.
826 * @sw_context: Pointer to the software context.
827 * @header: A command header with an embedded user-space context handle.
829 * Convenience function: Call vmw_cmd_res_check with the user-space context
830 * handle embedded in @header.
832 static int vmw_cmd_cid_check(struct vmw_private
*dev_priv
,
833 struct vmw_sw_context
*sw_context
,
834 SVGA3dCmdHeader
*header
)
836 VMW_DECLARE_CMD_VAR(*cmd
, uint32_t) =
837 container_of(header
, typeof(*cmd
), header
);
839 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
840 VMW_RES_DIRTY_SET
, user_context_converter
,
845 * vmw_execbuf_info_from_res - Get the private validation metadata for a
846 * recently validated resource
848 * @sw_context: Pointer to the command submission context
851 * The resource pointed to by @res needs to be present in the command submission
852 * context's resource cache and hence the last resource of that type to be
853 * processed by the validation code.
855 * Return: a pointer to the private metadata of the resource, or NULL if it
858 static struct vmw_ctx_validation_info
*
859 vmw_execbuf_info_from_res(struct vmw_sw_context
*sw_context
,
860 struct vmw_resource
*res
)
862 struct vmw_res_cache_entry
*rcache
=
863 &sw_context
->res_cache
[vmw_res_type(res
)];
865 if (rcache
->valid
&& rcache
->res
== res
)
866 return rcache
->private;
872 static int vmw_cmd_set_render_target_check(struct vmw_private
*dev_priv
,
873 struct vmw_sw_context
*sw_context
,
874 SVGA3dCmdHeader
*header
)
876 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdSetRenderTarget
);
877 struct vmw_resource
*ctx
;
878 struct vmw_resource
*res
;
881 cmd
= container_of(header
, typeof(*cmd
), header
);
883 if (cmd
->body
.type
>= SVGA3D_RT_MAX
) {
884 VMW_DEBUG_USER("Illegal render target type %u.\n",
885 (unsigned int) cmd
->body
.type
);
889 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
890 VMW_RES_DIRTY_SET
, user_context_converter
,
891 &cmd
->body
.cid
, &ctx
);
892 if (unlikely(ret
!= 0))
895 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
896 VMW_RES_DIRTY_SET
, user_surface_converter
,
897 &cmd
->body
.target
.sid
, &res
);
901 if (dev_priv
->has_mob
) {
902 struct vmw_ctx_bindinfo_view binding
;
903 struct vmw_ctx_validation_info
*node
;
905 node
= vmw_execbuf_info_from_res(sw_context
, ctx
);
909 binding
.bi
.ctx
= ctx
;
910 binding
.bi
.res
= res
;
911 binding
.bi
.bt
= vmw_ctx_binding_rt
;
912 binding
.slot
= cmd
->body
.type
;
913 vmw_binding_add(node
->staged
, &binding
.bi
, 0, binding
.slot
);
919 static int vmw_cmd_surface_copy_check(struct vmw_private
*dev_priv
,
920 struct vmw_sw_context
*sw_context
,
921 SVGA3dCmdHeader
*header
)
923 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdSurfaceCopy
);
926 cmd
= container_of(header
, typeof(*cmd
), header
);
928 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
929 VMW_RES_DIRTY_NONE
, user_surface_converter
,
930 &cmd
->body
.src
.sid
, NULL
);
934 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
935 VMW_RES_DIRTY_SET
, user_surface_converter
,
936 &cmd
->body
.dest
.sid
, NULL
);
939 static int vmw_cmd_buffer_copy_check(struct vmw_private
*dev_priv
,
940 struct vmw_sw_context
*sw_context
,
941 SVGA3dCmdHeader
*header
)
943 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXBufferCopy
);
946 cmd
= container_of(header
, typeof(*cmd
), header
);
947 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
948 VMW_RES_DIRTY_NONE
, user_surface_converter
,
949 &cmd
->body
.src
, NULL
);
953 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
954 VMW_RES_DIRTY_SET
, user_surface_converter
,
955 &cmd
->body
.dest
, NULL
);
958 static int vmw_cmd_pred_copy_check(struct vmw_private
*dev_priv
,
959 struct vmw_sw_context
*sw_context
,
960 SVGA3dCmdHeader
*header
)
962 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXPredCopyRegion
);
965 cmd
= container_of(header
, typeof(*cmd
), header
);
966 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
967 VMW_RES_DIRTY_NONE
, user_surface_converter
,
968 &cmd
->body
.srcSid
, NULL
);
972 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
973 VMW_RES_DIRTY_SET
, user_surface_converter
,
974 &cmd
->body
.dstSid
, NULL
);
977 static int vmw_cmd_stretch_blt_check(struct vmw_private
*dev_priv
,
978 struct vmw_sw_context
*sw_context
,
979 SVGA3dCmdHeader
*header
)
981 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdSurfaceStretchBlt
);
984 cmd
= container_of(header
, typeof(*cmd
), header
);
985 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
986 VMW_RES_DIRTY_NONE
, user_surface_converter
,
987 &cmd
->body
.src
.sid
, NULL
);
988 if (unlikely(ret
!= 0))
991 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
992 VMW_RES_DIRTY_SET
, user_surface_converter
,
993 &cmd
->body
.dest
.sid
, NULL
);
996 static int vmw_cmd_blt_surf_screen_check(struct vmw_private
*dev_priv
,
997 struct vmw_sw_context
*sw_context
,
998 SVGA3dCmdHeader
*header
)
1000 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdBlitSurfaceToScreen
) =
1001 container_of(header
, typeof(*cmd
), header
);
1003 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1004 VMW_RES_DIRTY_NONE
, user_surface_converter
,
1005 &cmd
->body
.srcImage
.sid
, NULL
);
1008 static int vmw_cmd_present_check(struct vmw_private
*dev_priv
,
1009 struct vmw_sw_context
*sw_context
,
1010 SVGA3dCmdHeader
*header
)
1012 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdPresent
) =
1013 container_of(header
, typeof(*cmd
), header
);
1015 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1016 VMW_RES_DIRTY_NONE
, user_surface_converter
,
1017 &cmd
->body
.sid
, NULL
);
1021 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1023 * @dev_priv: The device private structure.
1024 * @new_query_bo: The new buffer holding query results.
1025 * @sw_context: The software context used for this command submission.
1027 * This function checks whether @new_query_bo is suitable for holding query
1028 * results, and if another buffer currently is pinned for query results. If so,
1029 * the function prepares the state of @sw_context for switching pinned buffers
1030 * after successful submission of the current command batch.
1032 static int vmw_query_bo_switch_prepare(struct vmw_private
*dev_priv
,
1033 struct vmw_buffer_object
*new_query_bo
,
1034 struct vmw_sw_context
*sw_context
)
1036 struct vmw_res_cache_entry
*ctx_entry
=
1037 &sw_context
->res_cache
[vmw_res_context
];
1040 BUG_ON(!ctx_entry
->valid
);
1041 sw_context
->last_query_ctx
= ctx_entry
->res
;
1043 if (unlikely(new_query_bo
!= sw_context
->cur_query_bo
)) {
1045 if (unlikely(new_query_bo
->base
.num_pages
> 4)) {
1046 VMW_DEBUG_USER("Query buffer too large.\n");
1050 if (unlikely(sw_context
->cur_query_bo
!= NULL
)) {
1051 sw_context
->needs_post_query_barrier
= true;
1052 ret
= vmw_validation_add_bo(sw_context
->ctx
,
1053 sw_context
->cur_query_bo
,
1054 dev_priv
->has_mob
, false);
1055 if (unlikely(ret
!= 0))
1058 sw_context
->cur_query_bo
= new_query_bo
;
1060 ret
= vmw_validation_add_bo(sw_context
->ctx
,
1061 dev_priv
->dummy_query_bo
,
1062 dev_priv
->has_mob
, false);
1063 if (unlikely(ret
!= 0))
1071 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1073 * @dev_priv: The device private structure.
1074 * @sw_context: The software context used for this command submission batch.
1076 * This function will check if we're switching query buffers, and will then,
1077 * issue a dummy occlusion query wait used as a query barrier. When the fence
1078 * object following that query wait has signaled, we are sure that all preceding
1079 * queries have finished, and the old query buffer can be unpinned. However,
1080 * since both the new query buffer and the old one are fenced with that fence,
1081 * we can do an asynchronus unpin now, and be sure that the old query buffer
1082 * won't be moved until the fence has signaled.
1084 * As mentioned above, both the new - and old query buffers need to be fenced
1085 * using a sequence emitted *after* calling this function.
1087 static void vmw_query_bo_switch_commit(struct vmw_private
*dev_priv
,
1088 struct vmw_sw_context
*sw_context
)
1091 * The validate list should still hold references to all
1094 if (sw_context
->needs_post_query_barrier
) {
1095 struct vmw_res_cache_entry
*ctx_entry
=
1096 &sw_context
->res_cache
[vmw_res_context
];
1097 struct vmw_resource
*ctx
;
1100 BUG_ON(!ctx_entry
->valid
);
1101 ctx
= ctx_entry
->res
;
1103 ret
= vmw_fifo_emit_dummy_query(dev_priv
, ctx
->id
);
1105 if (unlikely(ret
!= 0))
1106 VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1109 if (dev_priv
->pinned_bo
!= sw_context
->cur_query_bo
) {
1110 if (dev_priv
->pinned_bo
) {
1111 vmw_bo_pin_reserved(dev_priv
->pinned_bo
, false);
1112 vmw_bo_unreference(&dev_priv
->pinned_bo
);
1115 if (!sw_context
->needs_post_query_barrier
) {
1116 vmw_bo_pin_reserved(sw_context
->cur_query_bo
, true);
1119 * We pin also the dummy_query_bo buffer so that we
1120 * don't need to validate it when emitting dummy queries
1121 * in context destroy paths.
1123 if (!dev_priv
->dummy_query_bo_pinned
) {
1124 vmw_bo_pin_reserved(dev_priv
->dummy_query_bo
,
1126 dev_priv
->dummy_query_bo_pinned
= true;
1129 BUG_ON(sw_context
->last_query_ctx
== NULL
);
1130 dev_priv
->query_cid
= sw_context
->last_query_ctx
->id
;
1131 dev_priv
->query_cid_valid
= true;
1132 dev_priv
->pinned_bo
=
1133 vmw_bo_reference(sw_context
->cur_query_bo
);
1139 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer handle
1142 * @dev_priv: Pointer to a device private structure.
1143 * @sw_context: The software context used for this command batch validation.
1144 * @id: Pointer to the user-space handle to be translated.
1145 * @vmw_bo_p: Points to a location that, on successful return will carry a
1146 * non-reference-counted pointer to the buffer object identified by the
1147 * user-space handle in @id.
1149 * This function saves information needed to translate a user-space buffer
1150 * handle to a MOB id. The translation does not take place immediately, but
1151 * during a call to vmw_apply_relocations().
1153 * This function builds a relocation list and a list of buffers to validate. The
1154 * former needs to be freed using either vmw_apply_relocations() or
1155 * vmw_free_relocations(). The latter needs to be freed using
1156 * vmw_clear_validations.
1158 static int vmw_translate_mob_ptr(struct vmw_private
*dev_priv
,
1159 struct vmw_sw_context
*sw_context
,
1161 struct vmw_buffer_object
**vmw_bo_p
)
1163 struct vmw_buffer_object
*vmw_bo
;
1164 uint32_t handle
= *id
;
1165 struct vmw_relocation
*reloc
;
1168 vmw_validation_preload_bo(sw_context
->ctx
);
1169 vmw_bo
= vmw_user_bo_noref_lookup(sw_context
->fp
->tfile
, handle
);
1170 if (IS_ERR(vmw_bo
)) {
1171 VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
1172 return PTR_ERR(vmw_bo
);
1175 ret
= vmw_validation_add_bo(sw_context
->ctx
, vmw_bo
, true, false);
1176 vmw_user_bo_noref_release();
1177 if (unlikely(ret
!= 0))
1180 reloc
= vmw_validation_mem_alloc(sw_context
->ctx
, sizeof(*reloc
));
1184 reloc
->mob_loc
= id
;
1185 reloc
->vbo
= vmw_bo
;
1188 list_add_tail(&reloc
->head
, &sw_context
->bo_relocations
);
1194 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer handle
1195 * to a valid SVGAGuestPtr
1197 * @dev_priv: Pointer to a device private structure.
1198 * @sw_context: The software context used for this command batch validation.
1199 * @ptr: Pointer to the user-space handle to be translated.
1200 * @vmw_bo_p: Points to a location that, on successful return will carry a
1201 * non-reference-counted pointer to the DMA buffer identified by the user-space
1204 * This function saves information needed to translate a user-space buffer
1205 * handle to a valid SVGAGuestPtr. The translation does not take place
1206 * immediately, but during a call to vmw_apply_relocations().
1208 * This function builds a relocation list and a list of buffers to validate.
1209 * The former needs to be freed using either vmw_apply_relocations() or
1210 * vmw_free_relocations(). The latter needs to be freed using
1211 * vmw_clear_validations.
1213 static int vmw_translate_guest_ptr(struct vmw_private
*dev_priv
,
1214 struct vmw_sw_context
*sw_context
,
1216 struct vmw_buffer_object
**vmw_bo_p
)
1218 struct vmw_buffer_object
*vmw_bo
;
1219 uint32_t handle
= ptr
->gmrId
;
1220 struct vmw_relocation
*reloc
;
1223 vmw_validation_preload_bo(sw_context
->ctx
);
1224 vmw_bo
= vmw_user_bo_noref_lookup(sw_context
->fp
->tfile
, handle
);
1225 if (IS_ERR(vmw_bo
)) {
1226 VMW_DEBUG_USER("Could not find or use GMR region.\n");
1227 return PTR_ERR(vmw_bo
);
1230 ret
= vmw_validation_add_bo(sw_context
->ctx
, vmw_bo
, false, false);
1231 vmw_user_bo_noref_release();
1232 if (unlikely(ret
!= 0))
1235 reloc
= vmw_validation_mem_alloc(sw_context
->ctx
, sizeof(*reloc
));
1239 reloc
->location
= ptr
;
1240 reloc
->vbo
= vmw_bo
;
1242 list_add_tail(&reloc
->head
, &sw_context
->bo_relocations
);
1248 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1250 * @dev_priv: Pointer to a device private struct.
1251 * @sw_context: The software context used for this command submission.
1252 * @header: Pointer to the command header in the command stream.
1254 * This function adds the new query into the query COTABLE
1256 static int vmw_cmd_dx_define_query(struct vmw_private
*dev_priv
,
1257 struct vmw_sw_context
*sw_context
,
1258 SVGA3dCmdHeader
*header
)
1260 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXDefineQuery
);
1261 struct vmw_ctx_validation_info
*ctx_node
= VMW_GET_CTX_NODE(sw_context
);
1262 struct vmw_resource
*cotable_res
;
1268 cmd
= container_of(header
, typeof(*cmd
), header
);
1270 if (cmd
->body
.type
< SVGA3D_QUERYTYPE_MIN
||
1271 cmd
->body
.type
>= SVGA3D_QUERYTYPE_MAX
)
1274 cotable_res
= vmw_context_cotable(ctx_node
->ctx
, SVGA_COTABLE_DXQUERY
);
1275 ret
= vmw_cotable_notify(cotable_res
, cmd
->body
.queryId
);
1281 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1283 * @dev_priv: Pointer to a device private struct.
1284 * @sw_context: The software context used for this command submission.
1285 * @header: Pointer to the command header in the command stream.
1287 * The query bind operation will eventually associate the query ID with its
1288 * backing MOB. In this function, we take the user mode MOB ID and use
1289 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1291 static int vmw_cmd_dx_bind_query(struct vmw_private
*dev_priv
,
1292 struct vmw_sw_context
*sw_context
,
1293 SVGA3dCmdHeader
*header
)
1295 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXBindQuery
);
1296 struct vmw_buffer_object
*vmw_bo
;
1299 cmd
= container_of(header
, typeof(*cmd
), header
);
1302 * Look up the buffer pointed to by q.mobid, put it on the relocation
1303 * list so its kernel mode MOB ID can be filled in later
1305 ret
= vmw_translate_mob_ptr(dev_priv
, sw_context
, &cmd
->body
.mobid
,
1311 sw_context
->dx_query_mob
= vmw_bo
;
1312 sw_context
->dx_query_ctx
= sw_context
->dx_ctx_node
->ctx
;
1317 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1319 * @dev_priv: Pointer to a device private struct.
1320 * @sw_context: The software context used for this command submission.
1321 * @header: Pointer to the command header in the command stream.
1323 static int vmw_cmd_begin_gb_query(struct vmw_private
*dev_priv
,
1324 struct vmw_sw_context
*sw_context
,
1325 SVGA3dCmdHeader
*header
)
1327 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdBeginGBQuery
) =
1328 container_of(header
, typeof(*cmd
), header
);
1330 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
1331 VMW_RES_DIRTY_SET
, user_context_converter
,
1332 &cmd
->body
.cid
, NULL
);
1336 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1338 * @dev_priv: Pointer to a device private struct.
1339 * @sw_context: The software context used for this command submission.
1340 * @header: Pointer to the command header in the command stream.
1342 static int vmw_cmd_begin_query(struct vmw_private
*dev_priv
,
1343 struct vmw_sw_context
*sw_context
,
1344 SVGA3dCmdHeader
*header
)
1346 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdBeginQuery
) =
1347 container_of(header
, typeof(*cmd
), header
);
1349 if (unlikely(dev_priv
->has_mob
)) {
1350 VMW_DECLARE_CMD_VAR(gb_cmd
, SVGA3dCmdBeginGBQuery
);
1352 BUG_ON(sizeof(gb_cmd
) != sizeof(*cmd
));
1354 gb_cmd
.header
.id
= SVGA_3D_CMD_BEGIN_GB_QUERY
;
1355 gb_cmd
.header
.size
= cmd
->header
.size
;
1356 gb_cmd
.body
.cid
= cmd
->body
.cid
;
1357 gb_cmd
.body
.type
= cmd
->body
.type
;
1359 memcpy(cmd
, &gb_cmd
, sizeof(*cmd
));
1360 return vmw_cmd_begin_gb_query(dev_priv
, sw_context
, header
);
1363 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
1364 VMW_RES_DIRTY_SET
, user_context_converter
,
1365 &cmd
->body
.cid
, NULL
);
1369 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1371 * @dev_priv: Pointer to a device private struct.
1372 * @sw_context: The software context used for this command submission.
1373 * @header: Pointer to the command header in the command stream.
1375 static int vmw_cmd_end_gb_query(struct vmw_private
*dev_priv
,
1376 struct vmw_sw_context
*sw_context
,
1377 SVGA3dCmdHeader
*header
)
1379 struct vmw_buffer_object
*vmw_bo
;
1380 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdEndGBQuery
);
1383 cmd
= container_of(header
, typeof(*cmd
), header
);
1384 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1385 if (unlikely(ret
!= 0))
1388 ret
= vmw_translate_mob_ptr(dev_priv
, sw_context
, &cmd
->body
.mobid
,
1390 if (unlikely(ret
!= 0))
1393 ret
= vmw_query_bo_switch_prepare(dev_priv
, vmw_bo
, sw_context
);
1399 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1401 * @dev_priv: Pointer to a device private struct.
1402 * @sw_context: The software context used for this command submission.
1403 * @header: Pointer to the command header in the command stream.
1405 static int vmw_cmd_end_query(struct vmw_private
*dev_priv
,
1406 struct vmw_sw_context
*sw_context
,
1407 SVGA3dCmdHeader
*header
)
1409 struct vmw_buffer_object
*vmw_bo
;
1410 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdEndQuery
);
1413 cmd
= container_of(header
, typeof(*cmd
), header
);
1414 if (dev_priv
->has_mob
) {
1415 VMW_DECLARE_CMD_VAR(gb_cmd
, SVGA3dCmdEndGBQuery
);
1417 BUG_ON(sizeof(gb_cmd
) != sizeof(*cmd
));
1419 gb_cmd
.header
.id
= SVGA_3D_CMD_END_GB_QUERY
;
1420 gb_cmd
.header
.size
= cmd
->header
.size
;
1421 gb_cmd
.body
.cid
= cmd
->body
.cid
;
1422 gb_cmd
.body
.type
= cmd
->body
.type
;
1423 gb_cmd
.body
.mobid
= cmd
->body
.guestResult
.gmrId
;
1424 gb_cmd
.body
.offset
= cmd
->body
.guestResult
.offset
;
1426 memcpy(cmd
, &gb_cmd
, sizeof(*cmd
));
1427 return vmw_cmd_end_gb_query(dev_priv
, sw_context
, header
);
1430 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1431 if (unlikely(ret
!= 0))
1434 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
1435 &cmd
->body
.guestResult
, &vmw_bo
);
1436 if (unlikely(ret
!= 0))
1439 ret
= vmw_query_bo_switch_prepare(dev_priv
, vmw_bo
, sw_context
);
1445 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1447 * @dev_priv: Pointer to a device private struct.
1448 * @sw_context: The software context used for this command submission.
1449 * @header: Pointer to the command header in the command stream.
1451 static int vmw_cmd_wait_gb_query(struct vmw_private
*dev_priv
,
1452 struct vmw_sw_context
*sw_context
,
1453 SVGA3dCmdHeader
*header
)
1455 struct vmw_buffer_object
*vmw_bo
;
1456 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdWaitForGBQuery
);
1459 cmd
= container_of(header
, typeof(*cmd
), header
);
1460 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1461 if (unlikely(ret
!= 0))
1464 ret
= vmw_translate_mob_ptr(dev_priv
, sw_context
, &cmd
->body
.mobid
,
1466 if (unlikely(ret
!= 0))
1473 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1475 * @dev_priv: Pointer to a device private struct.
1476 * @sw_context: The software context used for this command submission.
1477 * @header: Pointer to the command header in the command stream.
1479 static int vmw_cmd_wait_query(struct vmw_private
*dev_priv
,
1480 struct vmw_sw_context
*sw_context
,
1481 SVGA3dCmdHeader
*header
)
1483 struct vmw_buffer_object
*vmw_bo
;
1484 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdWaitForQuery
);
1487 cmd
= container_of(header
, typeof(*cmd
), header
);
1488 if (dev_priv
->has_mob
) {
1489 VMW_DECLARE_CMD_VAR(gb_cmd
, SVGA3dCmdWaitForGBQuery
);
1491 BUG_ON(sizeof(gb_cmd
) != sizeof(*cmd
));
1493 gb_cmd
.header
.id
= SVGA_3D_CMD_WAIT_FOR_GB_QUERY
;
1494 gb_cmd
.header
.size
= cmd
->header
.size
;
1495 gb_cmd
.body
.cid
= cmd
->body
.cid
;
1496 gb_cmd
.body
.type
= cmd
->body
.type
;
1497 gb_cmd
.body
.mobid
= cmd
->body
.guestResult
.gmrId
;
1498 gb_cmd
.body
.offset
= cmd
->body
.guestResult
.offset
;
1500 memcpy(cmd
, &gb_cmd
, sizeof(*cmd
));
1501 return vmw_cmd_wait_gb_query(dev_priv
, sw_context
, header
);
1504 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1505 if (unlikely(ret
!= 0))
1508 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
1509 &cmd
->body
.guestResult
, &vmw_bo
);
1510 if (unlikely(ret
!= 0))
1516 static int vmw_cmd_dma(struct vmw_private
*dev_priv
,
1517 struct vmw_sw_context
*sw_context
,
1518 SVGA3dCmdHeader
*header
)
1520 struct vmw_buffer_object
*vmw_bo
= NULL
;
1521 struct vmw_surface
*srf
= NULL
;
1522 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdSurfaceDMA
);
1524 SVGA3dCmdSurfaceDMASuffix
*suffix
;
1528 cmd
= container_of(header
, typeof(*cmd
), header
);
1529 suffix
= (SVGA3dCmdSurfaceDMASuffix
*)((unsigned long) &cmd
->body
+
1530 header
->size
- sizeof(*suffix
));
1532 /* Make sure device and verifier stays in sync. */
1533 if (unlikely(suffix
->suffixSize
!= sizeof(*suffix
))) {
1534 VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1538 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
1539 &cmd
->body
.guest
.ptr
, &vmw_bo
);
1540 if (unlikely(ret
!= 0))
1543 /* Make sure DMA doesn't cross BO boundaries. */
1544 bo_size
= vmw_bo
->base
.num_pages
* PAGE_SIZE
;
1545 if (unlikely(cmd
->body
.guest
.ptr
.offset
> bo_size
)) {
1546 VMW_DEBUG_USER("Invalid DMA offset.\n");
1550 bo_size
-= cmd
->body
.guest
.ptr
.offset
;
1551 if (unlikely(suffix
->maximumOffset
> bo_size
))
1552 suffix
->maximumOffset
= bo_size
;
1554 dirty
= (cmd
->body
.transfer
== SVGA3D_WRITE_HOST_VRAM
) ?
1555 VMW_RES_DIRTY_SET
: 0;
1556 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1557 dirty
, user_surface_converter
,
1558 &cmd
->body
.host
.sid
, NULL
);
1559 if (unlikely(ret
!= 0)) {
1560 if (unlikely(ret
!= -ERESTARTSYS
))
1561 VMW_DEBUG_USER("could not find surface for DMA.\n");
1565 srf
= vmw_res_to_srf(sw_context
->res_cache
[vmw_res_surface
].res
);
1567 vmw_kms_cursor_snoop(srf
, sw_context
->fp
->tfile
, &vmw_bo
->base
, header
);
1572 static int vmw_cmd_draw(struct vmw_private
*dev_priv
,
1573 struct vmw_sw_context
*sw_context
,
1574 SVGA3dCmdHeader
*header
)
1576 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDrawPrimitives
);
1577 SVGA3dVertexDecl
*decl
= (SVGA3dVertexDecl
*)(
1578 (unsigned long)header
+ sizeof(*cmd
));
1579 SVGA3dPrimitiveRange
*range
;
1584 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1585 if (unlikely(ret
!= 0))
1588 cmd
= container_of(header
, typeof(*cmd
), header
);
1589 maxnum
= (header
->size
- sizeof(cmd
->body
)) / sizeof(*decl
);
1591 if (unlikely(cmd
->body
.numVertexDecls
> maxnum
)) {
1592 VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1596 for (i
= 0; i
< cmd
->body
.numVertexDecls
; ++i
, ++decl
) {
1597 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1599 user_surface_converter
,
1600 &decl
->array
.surfaceId
, NULL
);
1601 if (unlikely(ret
!= 0))
1605 maxnum
= (header
->size
- sizeof(cmd
->body
) -
1606 cmd
->body
.numVertexDecls
* sizeof(*decl
)) / sizeof(*range
);
1607 if (unlikely(cmd
->body
.numRanges
> maxnum
)) {
1608 VMW_DEBUG_USER("Illegal number of index ranges.\n");
1612 range
= (SVGA3dPrimitiveRange
*) decl
;
1613 for (i
= 0; i
< cmd
->body
.numRanges
; ++i
, ++range
) {
1614 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1616 user_surface_converter
,
1617 &range
->indexArray
.surfaceId
, NULL
);
1618 if (unlikely(ret
!= 0))
1624 static int vmw_cmd_tex_state(struct vmw_private
*dev_priv
,
1625 struct vmw_sw_context
*sw_context
,
1626 SVGA3dCmdHeader
*header
)
1628 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdSetTextureState
);
1629 SVGA3dTextureState
*last_state
= (SVGA3dTextureState
*)
1630 ((unsigned long) header
+ header
->size
+ sizeof(header
));
1631 SVGA3dTextureState
*cur_state
= (SVGA3dTextureState
*)
1632 ((unsigned long) header
+ sizeof(*cmd
));
1633 struct vmw_resource
*ctx
;
1634 struct vmw_resource
*res
;
1637 cmd
= container_of(header
, typeof(*cmd
), header
);
1639 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
1640 VMW_RES_DIRTY_SET
, user_context_converter
,
1641 &cmd
->body
.cid
, &ctx
);
1642 if (unlikely(ret
!= 0))
1645 for (; cur_state
< last_state
; ++cur_state
) {
1646 if (likely(cur_state
->name
!= SVGA3D_TS_BIND_TEXTURE
))
1649 if (cur_state
->stage
>= SVGA3D_NUM_TEXTURE_UNITS
) {
1650 VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1651 (unsigned int) cur_state
->stage
);
1655 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1657 user_surface_converter
,
1658 &cur_state
->value
, &res
);
1659 if (unlikely(ret
!= 0))
1662 if (dev_priv
->has_mob
) {
1663 struct vmw_ctx_bindinfo_tex binding
;
1664 struct vmw_ctx_validation_info
*node
;
1666 node
= vmw_execbuf_info_from_res(sw_context
, ctx
);
1670 binding
.bi
.ctx
= ctx
;
1671 binding
.bi
.res
= res
;
1672 binding
.bi
.bt
= vmw_ctx_binding_tex
;
1673 binding
.texture_stage
= cur_state
->stage
;
1674 vmw_binding_add(node
->staged
, &binding
.bi
, 0,
1675 binding
.texture_stage
);
1682 static int vmw_cmd_check_define_gmrfb(struct vmw_private
*dev_priv
,
1683 struct vmw_sw_context
*sw_context
,
1686 struct vmw_buffer_object
*vmw_bo
;
1690 SVGAFifoCmdDefineGMRFB body
;
1693 return vmw_translate_guest_ptr(dev_priv
, sw_context
, &cmd
->body
.ptr
,
1698 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1701 * @dev_priv: Pointer to a device private struct.
1702 * @sw_context: The software context being used for this batch.
1703 * @val_node: The validation node representing the resource.
1704 * @buf_id: Pointer to the user-space backup buffer handle in the command
1706 * @backup_offset: Offset of backup into MOB.
1708 * This function prepares for registering a switch of backup buffers in the
1709 * resource metadata just prior to unreserving. It's basically a wrapper around
1710 * vmw_cmd_res_switch_backup with a different interface.
1712 static int vmw_cmd_res_switch_backup(struct vmw_private
*dev_priv
,
1713 struct vmw_sw_context
*sw_context
,
1714 struct vmw_resource
*res
, uint32_t *buf_id
,
1715 unsigned long backup_offset
)
1717 struct vmw_buffer_object
*vbo
;
1721 info
= vmw_execbuf_info_from_res(sw_context
, res
);
1725 ret
= vmw_translate_mob_ptr(dev_priv
, sw_context
, buf_id
, &vbo
);
1729 vmw_validation_res_switch_backup(sw_context
->ctx
, info
, vbo
,
1735 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1737 * @dev_priv: Pointer to a device private struct.
1738 * @sw_context: The software context being used for this batch.
1739 * @res_type: The resource type.
1740 * @converter: Information about user-space binding for this resource type.
1741 * @res_id: Pointer to the user-space resource handle in the command stream.
1742 * @buf_id: Pointer to the user-space backup buffer handle in the command
1744 * @backup_offset: Offset of backup into MOB.
1746 * This function prepares for registering a switch of backup buffers in the
1747 * resource metadata just prior to unreserving. It's basically a wrapper around
1748 * vmw_cmd_res_switch_backup with a different interface.
1750 static int vmw_cmd_switch_backup(struct vmw_private
*dev_priv
,
1751 struct vmw_sw_context
*sw_context
,
1752 enum vmw_res_type res_type
,
1753 const struct vmw_user_resource_conv
1754 *converter
, uint32_t *res_id
, uint32_t *buf_id
,
1755 unsigned long backup_offset
)
1757 struct vmw_resource
*res
;
1760 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, res_type
,
1761 VMW_RES_DIRTY_NONE
, converter
, res_id
, &res
);
1765 return vmw_cmd_res_switch_backup(dev_priv
, sw_context
, res
, buf_id
,
1770 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
1772 * @dev_priv: Pointer to a device private struct.
1773 * @sw_context: The software context being used for this batch.
1774 * @header: Pointer to the command header in the command stream.
1776 static int vmw_cmd_bind_gb_surface(struct vmw_private
*dev_priv
,
1777 struct vmw_sw_context
*sw_context
,
1778 SVGA3dCmdHeader
*header
)
1780 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdBindGBSurface
) =
1781 container_of(header
, typeof(*cmd
), header
);
1783 return vmw_cmd_switch_backup(dev_priv
, sw_context
, vmw_res_surface
,
1784 user_surface_converter
, &cmd
->body
.sid
,
1785 &cmd
->body
.mobid
, 0);
1789 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
1791 * @dev_priv: Pointer to a device private struct.
1792 * @sw_context: The software context being used for this batch.
1793 * @header: Pointer to the command header in the command stream.
1795 static int vmw_cmd_update_gb_image(struct vmw_private
*dev_priv
,
1796 struct vmw_sw_context
*sw_context
,
1797 SVGA3dCmdHeader
*header
)
1799 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdUpdateGBImage
) =
1800 container_of(header
, typeof(*cmd
), header
);
1802 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1803 VMW_RES_DIRTY_NONE
, user_surface_converter
,
1804 &cmd
->body
.image
.sid
, NULL
);
1808 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
1810 * @dev_priv: Pointer to a device private struct.
1811 * @sw_context: The software context being used for this batch.
1812 * @header: Pointer to the command header in the command stream.
1814 static int vmw_cmd_update_gb_surface(struct vmw_private
*dev_priv
,
1815 struct vmw_sw_context
*sw_context
,
1816 SVGA3dCmdHeader
*header
)
1818 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdUpdateGBSurface
) =
1819 container_of(header
, typeof(*cmd
), header
);
1821 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1822 VMW_RES_DIRTY_CLEAR
, user_surface_converter
,
1823 &cmd
->body
.sid
, NULL
);
1827 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
1829 * @dev_priv: Pointer to a device private struct.
1830 * @sw_context: The software context being used for this batch.
1831 * @header: Pointer to the command header in the command stream.
1833 static int vmw_cmd_readback_gb_image(struct vmw_private
*dev_priv
,
1834 struct vmw_sw_context
*sw_context
,
1835 SVGA3dCmdHeader
*header
)
1837 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdReadbackGBImage
) =
1838 container_of(header
, typeof(*cmd
), header
);
1840 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1841 VMW_RES_DIRTY_NONE
, user_surface_converter
,
1842 &cmd
->body
.image
.sid
, NULL
);
1846 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1849 * @dev_priv: Pointer to a device private struct.
1850 * @sw_context: The software context being used for this batch.
1851 * @header: Pointer to the command header in the command stream.
1853 static int vmw_cmd_readback_gb_surface(struct vmw_private
*dev_priv
,
1854 struct vmw_sw_context
*sw_context
,
1855 SVGA3dCmdHeader
*header
)
1857 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdReadbackGBSurface
) =
1858 container_of(header
, typeof(*cmd
), header
);
1860 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1861 VMW_RES_DIRTY_CLEAR
, user_surface_converter
,
1862 &cmd
->body
.sid
, NULL
);
1866 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1869 * @dev_priv: Pointer to a device private struct.
1870 * @sw_context: The software context being used for this batch.
1871 * @header: Pointer to the command header in the command stream.
1873 static int vmw_cmd_invalidate_gb_image(struct vmw_private
*dev_priv
,
1874 struct vmw_sw_context
*sw_context
,
1875 SVGA3dCmdHeader
*header
)
1877 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdInvalidateGBImage
) =
1878 container_of(header
, typeof(*cmd
), header
);
1880 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1881 VMW_RES_DIRTY_NONE
, user_surface_converter
,
1882 &cmd
->body
.image
.sid
, NULL
);
1886 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1889 * @dev_priv: Pointer to a device private struct.
1890 * @sw_context: The software context being used for this batch.
1891 * @header: Pointer to the command header in the command stream.
1893 static int vmw_cmd_invalidate_gb_surface(struct vmw_private
*dev_priv
,
1894 struct vmw_sw_context
*sw_context
,
1895 SVGA3dCmdHeader
*header
)
1897 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdInvalidateGBSurface
) =
1898 container_of(header
, typeof(*cmd
), header
);
1900 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1901 VMW_RES_DIRTY_CLEAR
, user_surface_converter
,
1902 &cmd
->body
.sid
, NULL
);
1906 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
1908 * @dev_priv: Pointer to a device private struct.
1909 * @sw_context: The software context being used for this batch.
1910 * @header: Pointer to the command header in the command stream.
1912 static int vmw_cmd_shader_define(struct vmw_private
*dev_priv
,
1913 struct vmw_sw_context
*sw_context
,
1914 SVGA3dCmdHeader
*header
)
1916 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDefineShader
);
1919 struct vmw_resource
*ctx
;
1921 cmd
= container_of(header
, typeof(*cmd
), header
);
1923 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
1924 VMW_RES_DIRTY_SET
, user_context_converter
,
1925 &cmd
->body
.cid
, &ctx
);
1926 if (unlikely(ret
!= 0))
1929 if (unlikely(!dev_priv
->has_mob
))
1932 size
= cmd
->header
.size
- sizeof(cmd
->body
);
1933 ret
= vmw_compat_shader_add(dev_priv
, vmw_context_res_man(ctx
),
1934 cmd
->body
.shid
, cmd
+ 1, cmd
->body
.type
,
1935 size
, &sw_context
->staged_cmd_res
);
1936 if (unlikely(ret
!= 0))
1939 return vmw_resource_relocation_add(sw_context
, NULL
,
1940 vmw_ptr_diff(sw_context
->buf_start
,
1946 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
1948 * @dev_priv: Pointer to a device private struct.
1949 * @sw_context: The software context being used for this batch.
1950 * @header: Pointer to the command header in the command stream.
1952 static int vmw_cmd_shader_destroy(struct vmw_private
*dev_priv
,
1953 struct vmw_sw_context
*sw_context
,
1954 SVGA3dCmdHeader
*header
)
1956 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDestroyShader
);
1958 struct vmw_resource
*ctx
;
1960 cmd
= container_of(header
, typeof(*cmd
), header
);
1962 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
1963 VMW_RES_DIRTY_SET
, user_context_converter
,
1964 &cmd
->body
.cid
, &ctx
);
1965 if (unlikely(ret
!= 0))
1968 if (unlikely(!dev_priv
->has_mob
))
1971 ret
= vmw_shader_remove(vmw_context_res_man(ctx
), cmd
->body
.shid
,
1972 cmd
->body
.type
, &sw_context
->staged_cmd_res
);
1973 if (unlikely(ret
!= 0))
1976 return vmw_resource_relocation_add(sw_context
, NULL
,
1977 vmw_ptr_diff(sw_context
->buf_start
,
1983 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
1985 * @dev_priv: Pointer to a device private struct.
1986 * @sw_context: The software context being used for this batch.
1987 * @header: Pointer to the command header in the command stream.
1989 static int vmw_cmd_set_shader(struct vmw_private
*dev_priv
,
1990 struct vmw_sw_context
*sw_context
,
1991 SVGA3dCmdHeader
*header
)
1993 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdSetShader
);
1994 struct vmw_ctx_bindinfo_shader binding
;
1995 struct vmw_resource
*ctx
, *res
= NULL
;
1996 struct vmw_ctx_validation_info
*ctx_info
;
1999 cmd
= container_of(header
, typeof(*cmd
), header
);
2001 if (cmd
->body
.type
>= SVGA3D_SHADERTYPE_PREDX_MAX
) {
2002 VMW_DEBUG_USER("Illegal shader type %u.\n",
2003 (unsigned int) cmd
->body
.type
);
2007 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
2008 VMW_RES_DIRTY_SET
, user_context_converter
,
2009 &cmd
->body
.cid
, &ctx
);
2010 if (unlikely(ret
!= 0))
2013 if (!dev_priv
->has_mob
)
2016 if (cmd
->body
.shid
!= SVGA3D_INVALID_ID
) {
2018 * This is the compat shader path - Per device guest-backed
2019 * shaders, but user-space thinks it's per context host-
2022 res
= vmw_shader_lookup(vmw_context_res_man(ctx
),
2023 cmd
->body
.shid
, cmd
->body
.type
);
2025 ret
= vmw_execbuf_res_noctx_val_add(sw_context
, res
,
2026 VMW_RES_DIRTY_NONE
);
2027 if (unlikely(ret
!= 0))
2030 ret
= vmw_resource_relocation_add
2032 vmw_ptr_diff(sw_context
->buf_start
,
2034 vmw_res_rel_normal
);
2035 if (unlikely(ret
!= 0))
2040 if (IS_ERR_OR_NULL(res
)) {
2041 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_shader
,
2043 user_shader_converter
, &cmd
->body
.shid
,
2045 if (unlikely(ret
!= 0))
2049 ctx_info
= vmw_execbuf_info_from_res(sw_context
, ctx
);
2053 binding
.bi
.ctx
= ctx
;
2054 binding
.bi
.res
= res
;
2055 binding
.bi
.bt
= vmw_ctx_binding_shader
;
2056 binding
.shader_slot
= cmd
->body
.type
- SVGA3D_SHADERTYPE_MIN
;
2057 vmw_binding_add(ctx_info
->staged
, &binding
.bi
, binding
.shader_slot
, 0);
2063 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2065 * @dev_priv: Pointer to a device private struct.
2066 * @sw_context: The software context being used for this batch.
2067 * @header: Pointer to the command header in the command stream.
2069 static int vmw_cmd_set_shader_const(struct vmw_private
*dev_priv
,
2070 struct vmw_sw_context
*sw_context
,
2071 SVGA3dCmdHeader
*header
)
2073 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdSetShaderConst
);
2076 cmd
= container_of(header
, typeof(*cmd
), header
);
2078 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
2079 VMW_RES_DIRTY_SET
, user_context_converter
,
2080 &cmd
->body
.cid
, NULL
);
2081 if (unlikely(ret
!= 0))
2084 if (dev_priv
->has_mob
)
2085 header
->id
= SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE
;
2091 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2093 * @dev_priv: Pointer to a device private struct.
2094 * @sw_context: The software context being used for this batch.
2095 * @header: Pointer to the command header in the command stream.
2097 static int vmw_cmd_bind_gb_shader(struct vmw_private
*dev_priv
,
2098 struct vmw_sw_context
*sw_context
,
2099 SVGA3dCmdHeader
*header
)
2101 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdBindGBShader
) =
2102 container_of(header
, typeof(*cmd
), header
);
2104 return vmw_cmd_switch_backup(dev_priv
, sw_context
, vmw_res_shader
,
2105 user_shader_converter
, &cmd
->body
.shid
,
2106 &cmd
->body
.mobid
, cmd
->body
.offsetInBytes
);
2110 * vmw_cmd_dx_set_single_constant_buffer - Validate
2111 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2113 * @dev_priv: Pointer to a device private struct.
2114 * @sw_context: The software context being used for this batch.
2115 * @header: Pointer to the command header in the command stream.
2118 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private
*dev_priv
,
2119 struct vmw_sw_context
*sw_context
,
2120 SVGA3dCmdHeader
*header
)
2122 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXSetSingleConstantBuffer
);
2123 SVGA3dShaderType max_shader_num
= has_sm5_context(dev_priv
) ?
2124 SVGA3D_NUM_SHADERTYPE
: SVGA3D_NUM_SHADERTYPE_DX10
;
2126 struct vmw_resource
*res
= NULL
;
2127 struct vmw_ctx_validation_info
*ctx_node
= VMW_GET_CTX_NODE(sw_context
);
2128 struct vmw_ctx_bindinfo_cb binding
;
2134 cmd
= container_of(header
, typeof(*cmd
), header
);
2135 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2136 VMW_RES_DIRTY_NONE
, user_surface_converter
,
2137 &cmd
->body
.sid
, &res
);
2138 if (unlikely(ret
!= 0))
2141 binding
.bi
.ctx
= ctx_node
->ctx
;
2142 binding
.bi
.res
= res
;
2143 binding
.bi
.bt
= vmw_ctx_binding_cb
;
2144 binding
.shader_slot
= cmd
->body
.type
- SVGA3D_SHADERTYPE_MIN
;
2145 binding
.offset
= cmd
->body
.offsetInBytes
;
2146 binding
.size
= cmd
->body
.sizeInBytes
;
2147 binding
.slot
= cmd
->body
.slot
;
2149 if (binding
.shader_slot
>= max_shader_num
||
2150 binding
.slot
>= SVGA3D_DX_MAX_CONSTBUFFERS
) {
2151 VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2152 (unsigned int) cmd
->body
.type
,
2153 (unsigned int) binding
.slot
);
2157 vmw_binding_add(ctx_node
->staged
, &binding
.bi
, binding
.shader_slot
,
2164 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2167 * @dev_priv: Pointer to a device private struct.
2168 * @sw_context: The software context being used for this batch.
2169 * @header: Pointer to the command header in the command stream.
2171 static int vmw_cmd_dx_set_shader_res(struct vmw_private
*dev_priv
,
2172 struct vmw_sw_context
*sw_context
,
2173 SVGA3dCmdHeader
*header
)
2175 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXSetShaderResources
) =
2176 container_of(header
, typeof(*cmd
), header
);
2177 SVGA3dShaderType max_allowed
= has_sm5_context(dev_priv
) ?
2178 SVGA3D_SHADERTYPE_MAX
: SVGA3D_SHADERTYPE_DX10_MAX
;
2180 u32 num_sr_view
= (cmd
->header
.size
- sizeof(cmd
->body
)) /
2181 sizeof(SVGA3dShaderResourceViewId
);
2183 if ((u64
) cmd
->body
.startView
+ (u64
) num_sr_view
>
2184 (u64
) SVGA3D_DX_MAX_SRVIEWS
||
2185 cmd
->body
.type
>= max_allowed
) {
2186 VMW_DEBUG_USER("Invalid shader binding.\n");
2190 return vmw_view_bindings_add(sw_context
, vmw_view_sr
,
2192 cmd
->body
.type
- SVGA3D_SHADERTYPE_MIN
,
2193 (void *) &cmd
[1], num_sr_view
,
2194 cmd
->body
.startView
);
2198 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2200 * @dev_priv: Pointer to a device private struct.
2201 * @sw_context: The software context being used for this batch.
2202 * @header: Pointer to the command header in the command stream.
2204 static int vmw_cmd_dx_set_shader(struct vmw_private
*dev_priv
,
2205 struct vmw_sw_context
*sw_context
,
2206 SVGA3dCmdHeader
*header
)
2208 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXSetShader
);
2209 SVGA3dShaderType max_allowed
= has_sm5_context(dev_priv
) ?
2210 SVGA3D_SHADERTYPE_MAX
: SVGA3D_SHADERTYPE_DX10_MAX
;
2211 struct vmw_resource
*res
= NULL
;
2212 struct vmw_ctx_validation_info
*ctx_node
= VMW_GET_CTX_NODE(sw_context
);
2213 struct vmw_ctx_bindinfo_shader binding
;
2219 cmd
= container_of(header
, typeof(*cmd
), header
);
2221 if (cmd
->body
.type
>= max_allowed
||
2222 cmd
->body
.type
< SVGA3D_SHADERTYPE_MIN
) {
2223 VMW_DEBUG_USER("Illegal shader type %u.\n",
2224 (unsigned int) cmd
->body
.type
);
2228 if (cmd
->body
.shaderId
!= SVGA3D_INVALID_ID
) {
2229 res
= vmw_shader_lookup(sw_context
->man
, cmd
->body
.shaderId
, 0);
2231 VMW_DEBUG_USER("Could not find shader for binding.\n");
2232 return PTR_ERR(res
);
2235 ret
= vmw_execbuf_res_noctx_val_add(sw_context
, res
,
2236 VMW_RES_DIRTY_NONE
);
2241 binding
.bi
.ctx
= ctx_node
->ctx
;
2242 binding
.bi
.res
= res
;
2243 binding
.bi
.bt
= vmw_ctx_binding_dx_shader
;
2244 binding
.shader_slot
= cmd
->body
.type
- SVGA3D_SHADERTYPE_MIN
;
2246 vmw_binding_add(ctx_node
->staged
, &binding
.bi
, binding
.shader_slot
, 0);
2252 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2255 * @dev_priv: Pointer to a device private struct.
2256 * @sw_context: The software context being used for this batch.
2257 * @header: Pointer to the command header in the command stream.
2259 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private
*dev_priv
,
2260 struct vmw_sw_context
*sw_context
,
2261 SVGA3dCmdHeader
*header
)
2263 struct vmw_ctx_validation_info
*ctx_node
= VMW_GET_CTX_NODE(sw_context
);
2264 struct vmw_ctx_bindinfo_vb binding
;
2265 struct vmw_resource
*res
;
2267 SVGA3dCmdHeader header
;
2268 SVGA3dCmdDXSetVertexBuffers body
;
2269 SVGA3dVertexBuffer buf
[];
2276 cmd
= container_of(header
, typeof(*cmd
), header
);
2277 num
= (cmd
->header
.size
- sizeof(cmd
->body
)) /
2278 sizeof(SVGA3dVertexBuffer
);
2279 if ((u64
)num
+ (u64
)cmd
->body
.startBuffer
>
2280 (u64
)SVGA3D_DX_MAX_VERTEXBUFFERS
) {
2281 VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2285 for (i
= 0; i
< num
; i
++) {
2286 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2288 user_surface_converter
,
2289 &cmd
->buf
[i
].sid
, &res
);
2290 if (unlikely(ret
!= 0))
2293 binding
.bi
.ctx
= ctx_node
->ctx
;
2294 binding
.bi
.bt
= vmw_ctx_binding_vb
;
2295 binding
.bi
.res
= res
;
2296 binding
.offset
= cmd
->buf
[i
].offset
;
2297 binding
.stride
= cmd
->buf
[i
].stride
;
2298 binding
.slot
= i
+ cmd
->body
.startBuffer
;
2300 vmw_binding_add(ctx_node
->staged
, &binding
.bi
, 0, binding
.slot
);
2307 * vmw_cmd_dx_ia_set_vertex_buffers - Validate
2308 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2310 * @dev_priv: Pointer to a device private struct.
2311 * @sw_context: The software context being used for this batch.
2312 * @header: Pointer to the command header in the command stream.
2314 static int vmw_cmd_dx_set_index_buffer(struct vmw_private
*dev_priv
,
2315 struct vmw_sw_context
*sw_context
,
2316 SVGA3dCmdHeader
*header
)
2318 struct vmw_ctx_validation_info
*ctx_node
= VMW_GET_CTX_NODE(sw_context
);
2319 struct vmw_ctx_bindinfo_ib binding
;
2320 struct vmw_resource
*res
;
2321 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXSetIndexBuffer
);
2327 cmd
= container_of(header
, typeof(*cmd
), header
);
2328 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2329 VMW_RES_DIRTY_NONE
, user_surface_converter
,
2330 &cmd
->body
.sid
, &res
);
2331 if (unlikely(ret
!= 0))
2334 binding
.bi
.ctx
= ctx_node
->ctx
;
2335 binding
.bi
.res
= res
;
2336 binding
.bi
.bt
= vmw_ctx_binding_ib
;
2337 binding
.offset
= cmd
->body
.offset
;
2338 binding
.format
= cmd
->body
.format
;
2340 vmw_binding_add(ctx_node
->staged
, &binding
.bi
, 0, 0);
2346 * vmw_cmd_dx_set_rendertarget - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2349 * @dev_priv: Pointer to a device private struct.
2350 * @sw_context: The software context being used for this batch.
2351 * @header: Pointer to the command header in the command stream.
2353 static int vmw_cmd_dx_set_rendertargets(struct vmw_private
*dev_priv
,
2354 struct vmw_sw_context
*sw_context
,
2355 SVGA3dCmdHeader
*header
)
2357 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXSetRenderTargets
) =
2358 container_of(header
, typeof(*cmd
), header
);
2359 u32 num_rt_view
= (cmd
->header
.size
- sizeof(cmd
->body
)) /
2360 sizeof(SVGA3dRenderTargetViewId
);
2363 if (num_rt_view
> SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS
) {
2364 VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2368 ret
= vmw_view_bindings_add(sw_context
, vmw_view_ds
, vmw_ctx_binding_ds
,
2369 0, &cmd
->body
.depthStencilViewId
, 1, 0);
2373 return vmw_view_bindings_add(sw_context
, vmw_view_rt
,
2374 vmw_ctx_binding_dx_rt
, 0, (void *)&cmd
[1],
2379 * vmw_cmd_dx_clear_rendertarget_view - Validate
2380 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2382 * @dev_priv: Pointer to a device private struct.
2383 * @sw_context: The software context being used for this batch.
2384 * @header: Pointer to the command header in the command stream.
2386 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private
*dev_priv
,
2387 struct vmw_sw_context
*sw_context
,
2388 SVGA3dCmdHeader
*header
)
2390 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXClearRenderTargetView
) =
2391 container_of(header
, typeof(*cmd
), header
);
2392 struct vmw_resource
*ret
;
2394 ret
= vmw_view_id_val_add(sw_context
, vmw_view_rt
,
2395 cmd
->body
.renderTargetViewId
);
2397 return PTR_ERR_OR_ZERO(ret
);
2401 * vmw_cmd_dx_clear_rendertarget_view - Validate
2402 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2404 * @dev_priv: Pointer to a device private struct.
2405 * @sw_context: The software context being used for this batch.
2406 * @header: Pointer to the command header in the command stream.
2408 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private
*dev_priv
,
2409 struct vmw_sw_context
*sw_context
,
2410 SVGA3dCmdHeader
*header
)
2412 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXClearDepthStencilView
) =
2413 container_of(header
, typeof(*cmd
), header
);
2414 struct vmw_resource
*ret
;
2416 ret
= vmw_view_id_val_add(sw_context
, vmw_view_ds
,
2417 cmd
->body
.depthStencilViewId
);
2419 return PTR_ERR_OR_ZERO(ret
);
2422 static int vmw_cmd_dx_view_define(struct vmw_private
*dev_priv
,
2423 struct vmw_sw_context
*sw_context
,
2424 SVGA3dCmdHeader
*header
)
2426 struct vmw_ctx_validation_info
*ctx_node
= VMW_GET_CTX_NODE(sw_context
);
2427 struct vmw_resource
*srf
;
2428 struct vmw_resource
*res
;
2429 enum vmw_view_type view_type
;
2432 * This is based on the fact that all affected define commands have the
2433 * same initial command body layout.
2436 SVGA3dCmdHeader header
;
2444 view_type
= vmw_view_cmd_to_type(header
->id
);
2445 if (view_type
== vmw_view_max
)
2448 cmd
= container_of(header
, typeof(*cmd
), header
);
2449 if (unlikely(cmd
->sid
== SVGA3D_INVALID_ID
)) {
2450 VMW_DEBUG_USER("Invalid surface id.\n");
2453 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2454 VMW_RES_DIRTY_NONE
, user_surface_converter
,
2456 if (unlikely(ret
!= 0))
2459 res
= vmw_context_cotable(ctx_node
->ctx
, vmw_view_cotables
[view_type
]);
2460 ret
= vmw_cotable_notify(res
, cmd
->defined_id
);
2461 if (unlikely(ret
!= 0))
2464 return vmw_view_add(sw_context
->man
, ctx_node
->ctx
, srf
, view_type
,
2465 cmd
->defined_id
, header
,
2466 header
->size
+ sizeof(*header
),
2467 &sw_context
->staged_cmd_res
);
2471 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2473 * @dev_priv: Pointer to a device private struct.
2474 * @sw_context: The software context being used for this batch.
2475 * @header: Pointer to the command header in the command stream.
2477 static int vmw_cmd_dx_set_so_targets(struct vmw_private
*dev_priv
,
2478 struct vmw_sw_context
*sw_context
,
2479 SVGA3dCmdHeader
*header
)
2481 struct vmw_ctx_validation_info
*ctx_node
= VMW_GET_CTX_NODE(sw_context
);
2482 struct vmw_ctx_bindinfo_so_target binding
;
2483 struct vmw_resource
*res
;
2485 SVGA3dCmdHeader header
;
2486 SVGA3dCmdDXSetSOTargets body
;
2487 SVGA3dSoTarget targets
[];
2494 cmd
= container_of(header
, typeof(*cmd
), header
);
2495 num
= (cmd
->header
.size
- sizeof(cmd
->body
)) / sizeof(SVGA3dSoTarget
);
2497 if (num
> SVGA3D_DX_MAX_SOTARGETS
) {
2498 VMW_DEBUG_USER("Invalid DX SO binding.\n");
2502 for (i
= 0; i
< num
; i
++) {
2503 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2505 user_surface_converter
,
2506 &cmd
->targets
[i
].sid
, &res
);
2507 if (unlikely(ret
!= 0))
2510 binding
.bi
.ctx
= ctx_node
->ctx
;
2511 binding
.bi
.res
= res
;
2512 binding
.bi
.bt
= vmw_ctx_binding_so_target
,
2513 binding
.offset
= cmd
->targets
[i
].offset
;
2514 binding
.size
= cmd
->targets
[i
].sizeInBytes
;
2517 vmw_binding_add(ctx_node
->staged
, &binding
.bi
, 0, binding
.slot
);
2523 static int vmw_cmd_dx_so_define(struct vmw_private
*dev_priv
,
2524 struct vmw_sw_context
*sw_context
,
2525 SVGA3dCmdHeader
*header
)
2527 struct vmw_ctx_validation_info
*ctx_node
= VMW_GET_CTX_NODE(sw_context
);
2528 struct vmw_resource
*res
;
2530 * This is based on the fact that all affected define commands have
2531 * the same initial command body layout.
2534 SVGA3dCmdHeader header
;
2537 enum vmw_so_type so_type
;
2543 so_type
= vmw_so_cmd_to_type(header
->id
);
2544 res
= vmw_context_cotable(ctx_node
->ctx
, vmw_so_cotables
[so_type
]);
2545 cmd
= container_of(header
, typeof(*cmd
), header
);
2546 ret
= vmw_cotable_notify(res
, cmd
->defined_id
);
2552 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2555 * @dev_priv: Pointer to a device private struct.
2556 * @sw_context: The software context being used for this batch.
2557 * @header: Pointer to the command header in the command stream.
2559 static int vmw_cmd_dx_check_subresource(struct vmw_private
*dev_priv
,
2560 struct vmw_sw_context
*sw_context
,
2561 SVGA3dCmdHeader
*header
)
2564 SVGA3dCmdHeader header
;
2566 SVGA3dCmdDXReadbackSubResource r_body
;
2567 SVGA3dCmdDXInvalidateSubResource i_body
;
2568 SVGA3dCmdDXUpdateSubResource u_body
;
2569 SVGA3dSurfaceId sid
;
2573 BUILD_BUG_ON(offsetof(typeof(*cmd
), r_body
.sid
) !=
2574 offsetof(typeof(*cmd
), sid
));
2575 BUILD_BUG_ON(offsetof(typeof(*cmd
), i_body
.sid
) !=
2576 offsetof(typeof(*cmd
), sid
));
2577 BUILD_BUG_ON(offsetof(typeof(*cmd
), u_body
.sid
) !=
2578 offsetof(typeof(*cmd
), sid
));
2580 cmd
= container_of(header
, typeof(*cmd
), header
);
2581 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2582 VMW_RES_DIRTY_NONE
, user_surface_converter
,
2586 static int vmw_cmd_dx_cid_check(struct vmw_private
*dev_priv
,
2587 struct vmw_sw_context
*sw_context
,
2588 SVGA3dCmdHeader
*header
)
2590 struct vmw_ctx_validation_info
*ctx_node
= VMW_GET_CTX_NODE(sw_context
);
2599 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2600 * resource for removal.
2602 * @dev_priv: Pointer to a device private struct.
2603 * @sw_context: The software context being used for this batch.
2604 * @header: Pointer to the command header in the command stream.
2606 * Check that the view exists, and if it was not created using this command
2607 * batch, conditionally make this command a NOP.
2609 static int vmw_cmd_dx_view_remove(struct vmw_private
*dev_priv
,
2610 struct vmw_sw_context
*sw_context
,
2611 SVGA3dCmdHeader
*header
)
2613 struct vmw_ctx_validation_info
*ctx_node
= VMW_GET_CTX_NODE(sw_context
);
2615 SVGA3dCmdHeader header
;
2616 union vmw_view_destroy body
;
2617 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2618 enum vmw_view_type view_type
= vmw_view_cmd_to_type(header
->id
);
2619 struct vmw_resource
*view
;
2625 ret
= vmw_view_remove(sw_context
->man
, cmd
->body
.view_id
, view_type
,
2626 &sw_context
->staged_cmd_res
, &view
);
2631 * If the view wasn't created during this command batch, it might
2632 * have been removed due to a context swapout, so add a
2633 * relocation to conditionally make this command a NOP to avoid
2636 return vmw_resource_relocation_add(sw_context
, view
,
2637 vmw_ptr_diff(sw_context
->buf_start
,
2639 vmw_res_rel_cond_nop
);
2643 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2645 * @dev_priv: Pointer to a device private struct.
2646 * @sw_context: The software context being used for this batch.
2647 * @header: Pointer to the command header in the command stream.
2649 static int vmw_cmd_dx_define_shader(struct vmw_private
*dev_priv
,
2650 struct vmw_sw_context
*sw_context
,
2651 SVGA3dCmdHeader
*header
)
2653 struct vmw_ctx_validation_info
*ctx_node
= VMW_GET_CTX_NODE(sw_context
);
2654 struct vmw_resource
*res
;
2655 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXDefineShader
) =
2656 container_of(header
, typeof(*cmd
), header
);
2662 res
= vmw_context_cotable(ctx_node
->ctx
, SVGA_COTABLE_DXSHADER
);
2663 ret
= vmw_cotable_notify(res
, cmd
->body
.shaderId
);
2667 return vmw_dx_shader_add(sw_context
->man
, ctx_node
->ctx
,
2668 cmd
->body
.shaderId
, cmd
->body
.type
,
2669 &sw_context
->staged_cmd_res
);
2673 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2675 * @dev_priv: Pointer to a device private struct.
2676 * @sw_context: The software context being used for this batch.
2677 * @header: Pointer to the command header in the command stream.
2679 static int vmw_cmd_dx_destroy_shader(struct vmw_private
*dev_priv
,
2680 struct vmw_sw_context
*sw_context
,
2681 SVGA3dCmdHeader
*header
)
2683 struct vmw_ctx_validation_info
*ctx_node
= VMW_GET_CTX_NODE(sw_context
);
2684 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXDestroyShader
) =
2685 container_of(header
, typeof(*cmd
), header
);
2691 ret
= vmw_shader_remove(sw_context
->man
, cmd
->body
.shaderId
, 0,
2692 &sw_context
->staged_cmd_res
);
2698 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2700 * @dev_priv: Pointer to a device private struct.
2701 * @sw_context: The software context being used for this batch.
2702 * @header: Pointer to the command header in the command stream.
2704 static int vmw_cmd_dx_bind_shader(struct vmw_private
*dev_priv
,
2705 struct vmw_sw_context
*sw_context
,
2706 SVGA3dCmdHeader
*header
)
2708 struct vmw_resource
*ctx
;
2709 struct vmw_resource
*res
;
2710 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXBindShader
) =
2711 container_of(header
, typeof(*cmd
), header
);
2714 if (cmd
->body
.cid
!= SVGA3D_INVALID_ID
) {
2715 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
2717 user_context_converter
, &cmd
->body
.cid
,
2722 struct vmw_ctx_validation_info
*ctx_node
=
2723 VMW_GET_CTX_NODE(sw_context
);
2728 ctx
= ctx_node
->ctx
;
2731 res
= vmw_shader_lookup(vmw_context_res_man(ctx
), cmd
->body
.shid
, 0);
2733 VMW_DEBUG_USER("Could not find shader to bind.\n");
2734 return PTR_ERR(res
);
2737 ret
= vmw_execbuf_res_noctx_val_add(sw_context
, res
,
2738 VMW_RES_DIRTY_NONE
);
2740 VMW_DEBUG_USER("Error creating resource validation node.\n");
2744 return vmw_cmd_res_switch_backup(dev_priv
, sw_context
, res
,
2746 cmd
->body
.offsetInBytes
);
2750 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2752 * @dev_priv: Pointer to a device private struct.
2753 * @sw_context: The software context being used for this batch.
2754 * @header: Pointer to the command header in the command stream.
2756 static int vmw_cmd_dx_genmips(struct vmw_private
*dev_priv
,
2757 struct vmw_sw_context
*sw_context
,
2758 SVGA3dCmdHeader
*header
)
2760 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXGenMips
) =
2761 container_of(header
, typeof(*cmd
), header
);
2762 struct vmw_resource
*ret
;
2764 ret
= vmw_view_id_val_add(sw_context
, vmw_view_sr
,
2765 cmd
->body
.shaderResourceViewId
);
2767 return PTR_ERR_OR_ZERO(ret
);
2771 * vmw_cmd_dx_transfer_from_buffer - Validate
2772 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2774 * @dev_priv: Pointer to a device private struct.
2775 * @sw_context: The software context being used for this batch.
2776 * @header: Pointer to the command header in the command stream.
2778 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private
*dev_priv
,
2779 struct vmw_sw_context
*sw_context
,
2780 SVGA3dCmdHeader
*header
)
2782 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXTransferFromBuffer
) =
2783 container_of(header
, typeof(*cmd
), header
);
2786 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2787 VMW_RES_DIRTY_NONE
, user_surface_converter
,
2788 &cmd
->body
.srcSid
, NULL
);
2792 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2793 VMW_RES_DIRTY_SET
, user_surface_converter
,
2794 &cmd
->body
.destSid
, NULL
);
2798 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2800 * @dev_priv: Pointer to a device private struct.
2801 * @sw_context: The software context being used for this batch.
2802 * @header: Pointer to the command header in the command stream.
2804 static int vmw_cmd_intra_surface_copy(struct vmw_private
*dev_priv
,
2805 struct vmw_sw_context
*sw_context
,
2806 SVGA3dCmdHeader
*header
)
2808 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdIntraSurfaceCopy
) =
2809 container_of(header
, typeof(*cmd
), header
);
2811 if (!(dev_priv
->capabilities2
& SVGA_CAP2_INTRA_SURFACE_COPY
))
2814 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2815 VMW_RES_DIRTY_SET
, user_surface_converter
,
2816 &cmd
->body
.surface
.sid
, NULL
);
2819 static int vmw_cmd_sm5(struct vmw_private
*dev_priv
,
2820 struct vmw_sw_context
*sw_context
,
2821 SVGA3dCmdHeader
*header
)
2823 if (!has_sm5_context(dev_priv
))
2829 static int vmw_cmd_sm5_view_define(struct vmw_private
*dev_priv
,
2830 struct vmw_sw_context
*sw_context
,
2831 SVGA3dCmdHeader
*header
)
2833 if (!has_sm5_context(dev_priv
))
2836 return vmw_cmd_dx_view_define(dev_priv
, sw_context
, header
);
2839 static int vmw_cmd_sm5_view_remove(struct vmw_private
*dev_priv
,
2840 struct vmw_sw_context
*sw_context
,
2841 SVGA3dCmdHeader
*header
)
2843 if (!has_sm5_context(dev_priv
))
2846 return vmw_cmd_dx_view_remove(dev_priv
, sw_context
, header
);
2849 static int vmw_cmd_clear_uav_uint(struct vmw_private
*dev_priv
,
2850 struct vmw_sw_context
*sw_context
,
2851 SVGA3dCmdHeader
*header
)
2854 SVGA3dCmdHeader header
;
2855 SVGA3dCmdDXClearUAViewUint body
;
2856 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2857 struct vmw_resource
*ret
;
2859 if (!has_sm5_context(dev_priv
))
2862 ret
= vmw_view_id_val_add(sw_context
, vmw_view_ua
,
2863 cmd
->body
.uaViewId
);
2865 return PTR_ERR_OR_ZERO(ret
);
2868 static int vmw_cmd_clear_uav_float(struct vmw_private
*dev_priv
,
2869 struct vmw_sw_context
*sw_context
,
2870 SVGA3dCmdHeader
*header
)
2873 SVGA3dCmdHeader header
;
2874 SVGA3dCmdDXClearUAViewFloat body
;
2875 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2876 struct vmw_resource
*ret
;
2878 if (!has_sm5_context(dev_priv
))
2881 ret
= vmw_view_id_val_add(sw_context
, vmw_view_ua
,
2882 cmd
->body
.uaViewId
);
2884 return PTR_ERR_OR_ZERO(ret
);
2887 static int vmw_cmd_set_uav(struct vmw_private
*dev_priv
,
2888 struct vmw_sw_context
*sw_context
,
2889 SVGA3dCmdHeader
*header
)
2892 SVGA3dCmdHeader header
;
2893 SVGA3dCmdDXSetUAViews body
;
2894 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2895 u32 num_uav
= (cmd
->header
.size
- sizeof(cmd
->body
)) /
2896 sizeof(SVGA3dUAViewId
);
2899 if (!has_sm5_context(dev_priv
))
2902 if (num_uav
> SVGA3D_MAX_UAVIEWS
) {
2903 VMW_DEBUG_USER("Invalid UAV binding.\n");
2907 ret
= vmw_view_bindings_add(sw_context
, vmw_view_ua
,
2908 vmw_ctx_binding_uav
, 0, (void *)&cmd
[1],
2913 vmw_binding_add_uav_index(sw_context
->dx_ctx_node
->staged
, 0,
2914 cmd
->body
.uavSpliceIndex
);
2919 static int vmw_cmd_set_cs_uav(struct vmw_private
*dev_priv
,
2920 struct vmw_sw_context
*sw_context
,
2921 SVGA3dCmdHeader
*header
)
2924 SVGA3dCmdHeader header
;
2925 SVGA3dCmdDXSetCSUAViews body
;
2926 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2927 u32 num_uav
= (cmd
->header
.size
- sizeof(cmd
->body
)) /
2928 sizeof(SVGA3dUAViewId
);
2931 if (!has_sm5_context(dev_priv
))
2934 if (num_uav
> SVGA3D_MAX_UAVIEWS
) {
2935 VMW_DEBUG_USER("Invalid UAV binding.\n");
2939 ret
= vmw_view_bindings_add(sw_context
, vmw_view_ua
,
2940 vmw_ctx_binding_cs_uav
, 0, (void *)&cmd
[1],
2945 vmw_binding_add_uav_index(sw_context
->dx_ctx_node
->staged
, 1,
2946 cmd
->body
.startIndex
);
2951 static int vmw_cmd_dx_define_streamoutput(struct vmw_private
*dev_priv
,
2952 struct vmw_sw_context
*sw_context
,
2953 SVGA3dCmdHeader
*header
)
2955 struct vmw_ctx_validation_info
*ctx_node
= sw_context
->dx_ctx_node
;
2956 struct vmw_resource
*res
;
2958 SVGA3dCmdHeader header
;
2959 SVGA3dCmdDXDefineStreamOutputWithMob body
;
2960 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2963 if (!has_sm5_context(dev_priv
))
2967 DRM_ERROR("DX Context not set.\n");
2971 res
= vmw_context_cotable(ctx_node
->ctx
, SVGA_COTABLE_STREAMOUTPUT
);
2972 ret
= vmw_cotable_notify(res
, cmd
->body
.soid
);
2976 return vmw_dx_streamoutput_add(sw_context
->man
, ctx_node
->ctx
,
2978 &sw_context
->staged_cmd_res
);
2981 static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private
*dev_priv
,
2982 struct vmw_sw_context
*sw_context
,
2983 SVGA3dCmdHeader
*header
)
2985 struct vmw_ctx_validation_info
*ctx_node
= sw_context
->dx_ctx_node
;
2986 struct vmw_resource
*res
;
2988 SVGA3dCmdHeader header
;
2989 SVGA3dCmdDXDestroyStreamOutput body
;
2990 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2993 DRM_ERROR("DX Context not set.\n");
2998 * When device does not support SM5 then streamoutput with mob command is
2999 * not available to user-space. Simply return in this case.
3001 if (!has_sm5_context(dev_priv
))
3005 * With SM5 capable device if lookup fails then user-space probably used
3006 * old streamoutput define command. Return without an error.
3008 res
= vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node
->ctx
),
3013 return vmw_dx_streamoutput_remove(sw_context
->man
, cmd
->body
.soid
,
3014 &sw_context
->staged_cmd_res
);
3017 static int vmw_cmd_dx_bind_streamoutput(struct vmw_private
*dev_priv
,
3018 struct vmw_sw_context
*sw_context
,
3019 SVGA3dCmdHeader
*header
)
3021 struct vmw_ctx_validation_info
*ctx_node
= sw_context
->dx_ctx_node
;
3022 struct vmw_resource
*res
;
3024 SVGA3dCmdHeader header
;
3025 SVGA3dCmdDXBindStreamOutput body
;
3026 } *cmd
= container_of(header
, typeof(*cmd
), header
);
3029 if (!has_sm5_context(dev_priv
))
3033 DRM_ERROR("DX Context not set.\n");
3037 res
= vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node
->ctx
),
3040 DRM_ERROR("Could not find streamoutput to bind.\n");
3041 return PTR_ERR(res
);
3044 vmw_dx_streamoutput_set_size(res
, cmd
->body
.sizeInBytes
);
3046 ret
= vmw_execbuf_res_noctx_val_add(sw_context
, res
,
3047 VMW_RES_DIRTY_NONE
);
3049 DRM_ERROR("Error creating resource validation node.\n");
3053 return vmw_cmd_res_switch_backup(dev_priv
, sw_context
, res
,
3055 cmd
->body
.offsetInBytes
);
3058 static int vmw_cmd_dx_set_streamoutput(struct vmw_private
*dev_priv
,
3059 struct vmw_sw_context
*sw_context
,
3060 SVGA3dCmdHeader
*header
)
3062 struct vmw_ctx_validation_info
*ctx_node
= sw_context
->dx_ctx_node
;
3063 struct vmw_resource
*res
;
3064 struct vmw_ctx_bindinfo_so binding
;
3066 SVGA3dCmdHeader header
;
3067 SVGA3dCmdDXSetStreamOutput body
;
3068 } *cmd
= container_of(header
, typeof(*cmd
), header
);
3072 DRM_ERROR("DX Context not set.\n");
3076 if (cmd
->body
.soid
== SVGA3D_INVALID_ID
)
3080 * When device does not support SM5 then streamoutput with mob command is
3081 * not available to user-space. Simply return in this case.
3083 if (!has_sm5_context(dev_priv
))
3087 * With SM5 capable device if lookup fails then user-space probably used
3088 * old streamoutput define command. Return without an error.
3090 res
= vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node
->ctx
),
3096 ret
= vmw_execbuf_res_noctx_val_add(sw_context
, res
,
3097 VMW_RES_DIRTY_NONE
);
3099 DRM_ERROR("Error creating resource validation node.\n");
3103 binding
.bi
.ctx
= ctx_node
->ctx
;
3104 binding
.bi
.res
= res
;
3105 binding
.bi
.bt
= vmw_ctx_binding_so
;
3106 binding
.slot
= 0; /* Only one SO set to context at a time. */
3108 vmw_binding_add(sw_context
->dx_ctx_node
->staged
, &binding
.bi
, 0,
3114 static int vmw_cmd_indexed_instanced_indirect(struct vmw_private
*dev_priv
,
3115 struct vmw_sw_context
*sw_context
,
3116 SVGA3dCmdHeader
*header
)
3118 struct vmw_draw_indexed_instanced_indirect_cmd
{
3119 SVGA3dCmdHeader header
;
3120 SVGA3dCmdDXDrawIndexedInstancedIndirect body
;
3121 } *cmd
= container_of(header
, typeof(*cmd
), header
);
3123 if (!has_sm5_context(dev_priv
))
3126 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
3127 VMW_RES_DIRTY_NONE
, user_surface_converter
,
3128 &cmd
->body
.argsBufferSid
, NULL
);
3131 static int vmw_cmd_instanced_indirect(struct vmw_private
*dev_priv
,
3132 struct vmw_sw_context
*sw_context
,
3133 SVGA3dCmdHeader
*header
)
3135 struct vmw_draw_instanced_indirect_cmd
{
3136 SVGA3dCmdHeader header
;
3137 SVGA3dCmdDXDrawInstancedIndirect body
;
3138 } *cmd
= container_of(header
, typeof(*cmd
), header
);
3140 if (!has_sm5_context(dev_priv
))
3143 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
3144 VMW_RES_DIRTY_NONE
, user_surface_converter
,
3145 &cmd
->body
.argsBufferSid
, NULL
);
3148 static int vmw_cmd_dispatch_indirect(struct vmw_private
*dev_priv
,
3149 struct vmw_sw_context
*sw_context
,
3150 SVGA3dCmdHeader
*header
)
3152 struct vmw_dispatch_indirect_cmd
{
3153 SVGA3dCmdHeader header
;
3154 SVGA3dCmdDXDispatchIndirect body
;
3155 } *cmd
= container_of(header
, typeof(*cmd
), header
);
3157 if (!has_sm5_context(dev_priv
))
3160 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
3161 VMW_RES_DIRTY_NONE
, user_surface_converter
,
3162 &cmd
->body
.argsBufferSid
, NULL
);
3165 static int vmw_cmd_check_not_3d(struct vmw_private
*dev_priv
,
3166 struct vmw_sw_context
*sw_context
,
3167 void *buf
, uint32_t *size
)
3169 uint32_t size_remaining
= *size
;
3172 cmd_id
= ((uint32_t *)buf
)[0];
3174 case SVGA_CMD_UPDATE
:
3175 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate
);
3177 case SVGA_CMD_DEFINE_GMRFB
:
3178 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB
);
3180 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN
:
3181 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen
);
3183 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB
:
3184 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen
);
3187 VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id
);
3191 if (*size
> size_remaining
) {
3192 VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
3197 if (unlikely(!sw_context
->kernel
)) {
3198 VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id
);
3202 if (cmd_id
== SVGA_CMD_DEFINE_GMRFB
)
3203 return vmw_cmd_check_define_gmrfb(dev_priv
, sw_context
, buf
);
3208 static const struct vmw_cmd_entry vmw_cmd_entries
[SVGA_3D_CMD_MAX
] = {
3209 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE
, &vmw_cmd_invalid
,
3210 false, false, false),
3211 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY
, &vmw_cmd_invalid
,
3212 false, false, false),
3213 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY
, &vmw_cmd_surface_copy_check
,
3214 true, false, false),
3215 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT
, &vmw_cmd_stretch_blt_check
,
3216 true, false, false),
3217 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA
, &vmw_cmd_dma
,
3218 true, false, false),
3219 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE
, &vmw_cmd_invalid
,
3220 false, false, false),
3221 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY
, &vmw_cmd_invalid
,
3222 false, false, false),
3223 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM
, &vmw_cmd_cid_check
,
3224 true, false, false),
3225 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE
, &vmw_cmd_cid_check
,
3226 true, false, false),
3227 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE
, &vmw_cmd_cid_check
,
3228 true, false, false),
3229 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET
,
3230 &vmw_cmd_set_render_target_check
, true, false, false),
3231 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE
, &vmw_cmd_tex_state
,
3232 true, false, false),
3233 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL
, &vmw_cmd_cid_check
,
3234 true, false, false),
3235 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA
, &vmw_cmd_cid_check
,
3236 true, false, false),
3237 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED
, &vmw_cmd_cid_check
,
3238 true, false, false),
3239 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT
, &vmw_cmd_cid_check
,
3240 true, false, false),
3241 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE
, &vmw_cmd_cid_check
,
3242 true, false, false),
3243 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR
, &vmw_cmd_cid_check
,
3244 true, false, false),
3245 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT
, &vmw_cmd_present_check
,
3246 false, false, false),
3247 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE
, &vmw_cmd_shader_define
,
3248 true, false, false),
3249 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY
, &vmw_cmd_shader_destroy
,
3250 true, false, false),
3251 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER
, &vmw_cmd_set_shader
,
3252 true, false, false),
3253 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST
, &vmw_cmd_set_shader_const
,
3254 true, false, false),
3255 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES
, &vmw_cmd_draw
,
3256 true, false, false),
3257 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT
, &vmw_cmd_cid_check
,
3258 true, false, false),
3259 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY
, &vmw_cmd_begin_query
,
3260 true, false, false),
3261 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY
, &vmw_cmd_end_query
,
3262 true, false, false),
3263 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY
, &vmw_cmd_wait_query
,
3264 true, false, false),
3265 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK
, &vmw_cmd_ok
,
3266 true, false, false),
3267 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN
,
3268 &vmw_cmd_blt_surf_screen_check
, false, false, false),
3269 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2
, &vmw_cmd_invalid
,
3270 false, false, false),
3271 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS
, &vmw_cmd_invalid
,
3272 false, false, false),
3273 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE
, &vmw_cmd_invalid
,
3274 false, false, false),
3275 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE
, &vmw_cmd_invalid
,
3276 false, false, false),
3277 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA
, &vmw_cmd_invalid
,
3278 false, false, false),
3279 VMW_CMD_DEF(SVGA_3D_CMD_DEAD1
, &vmw_cmd_invalid
,
3280 false, false, false),
3281 VMW_CMD_DEF(SVGA_3D_CMD_DEAD2
, &vmw_cmd_invalid
,
3282 false, false, false),
3283 VMW_CMD_DEF(SVGA_3D_CMD_DEAD12
, &vmw_cmd_invalid
, false, false, false),
3284 VMW_CMD_DEF(SVGA_3D_CMD_DEAD13
, &vmw_cmd_invalid
, false, false, false),
3285 VMW_CMD_DEF(SVGA_3D_CMD_DEAD14
, &vmw_cmd_invalid
, false, false, false),
3286 VMW_CMD_DEF(SVGA_3D_CMD_DEAD15
, &vmw_cmd_invalid
, false, false, false),
3287 VMW_CMD_DEF(SVGA_3D_CMD_DEAD16
, &vmw_cmd_invalid
, false, false, false),
3288 VMW_CMD_DEF(SVGA_3D_CMD_DEAD17
, &vmw_cmd_invalid
, false, false, false),
3289 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE
, &vmw_cmd_invalid
,
3290 false, false, true),
3291 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE
, &vmw_cmd_invalid
,
3292 false, false, true),
3293 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB
, &vmw_cmd_invalid
,
3294 false, false, true),
3295 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB
, &vmw_cmd_invalid
,
3296 false, false, true),
3297 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64
, &vmw_cmd_invalid
,
3298 false, false, true),
3299 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING
, &vmw_cmd_invalid
,
3300 false, false, true),
3301 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE
, &vmw_cmd_invalid
,
3302 false, false, true),
3303 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE
, &vmw_cmd_invalid
,
3304 false, false, true),
3305 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE
, &vmw_cmd_bind_gb_surface
,
3307 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE
, &vmw_cmd_invalid
,
3308 false, false, true),
3309 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE
, &vmw_cmd_update_gb_image
,
3311 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE
,
3312 &vmw_cmd_update_gb_surface
, true, false, true),
3313 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE
,
3314 &vmw_cmd_readback_gb_image
, true, false, true),
3315 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE
,
3316 &vmw_cmd_readback_gb_surface
, true, false, true),
3317 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE
,
3318 &vmw_cmd_invalidate_gb_image
, true, false, true),
3319 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE
,
3320 &vmw_cmd_invalidate_gb_surface
, true, false, true),
3321 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT
, &vmw_cmd_invalid
,
3322 false, false, true),
3323 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT
, &vmw_cmd_invalid
,
3324 false, false, true),
3325 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT
, &vmw_cmd_invalid
,
3326 false, false, true),
3327 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT
, &vmw_cmd_invalid
,
3328 false, false, true),
3329 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT
, &vmw_cmd_invalid
,
3330 false, false, true),
3331 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER
, &vmw_cmd_invalid
,
3332 false, false, true),
3333 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER
, &vmw_cmd_bind_gb_shader
,
3335 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER
, &vmw_cmd_invalid
,
3336 false, false, true),
3337 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64
, &vmw_cmd_invalid
,
3338 false, false, false),
3339 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY
, &vmw_cmd_begin_gb_query
,
3341 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY
, &vmw_cmd_end_gb_query
,
3343 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY
, &vmw_cmd_wait_gb_query
,
3345 VMW_CMD_DEF(SVGA_3D_CMD_NOP
, &vmw_cmd_ok
,
3347 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR
, &vmw_cmd_ok
,
3349 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART
, &vmw_cmd_invalid
,
3350 false, false, true),
3351 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART
, &vmw_cmd_invalid
,
3352 false, false, true),
3353 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART
, &vmw_cmd_invalid
,
3354 false, false, true),
3355 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE
, &vmw_cmd_invalid
,
3356 false, false, true),
3357 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET
, &vmw_cmd_invalid
,
3358 false, false, true),
3359 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET
, &vmw_cmd_invalid
,
3360 false, false, true),
3361 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET
, &vmw_cmd_invalid
,
3362 false, false, true),
3363 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET
, &vmw_cmd_invalid
,
3364 false, false, true),
3365 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL
, &vmw_cmd_invalid
,
3366 false, false, true),
3367 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL
, &vmw_cmd_invalid
,
3368 false, false, true),
3369 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE
, &vmw_cmd_cid_check
,
3371 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA
, &vmw_cmd_invalid
,
3372 false, false, true),
3373 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH
, &vmw_cmd_invalid
,
3374 false, false, true),
3375 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE
, &vmw_cmd_invalid
,
3376 false, false, true),
3377 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2
, &vmw_cmd_invalid
,
3378 false, false, true),
3381 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT
, &vmw_cmd_invalid
,
3382 false, false, true),
3383 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT
, &vmw_cmd_invalid
,
3384 false, false, true),
3385 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT
, &vmw_cmd_invalid
,
3386 false, false, true),
3387 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT
, &vmw_cmd_invalid
,
3388 false, false, true),
3389 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT
, &vmw_cmd_invalid
,
3390 false, false, true),
3391 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER
,
3392 &vmw_cmd_dx_set_single_constant_buffer
, true, false, true),
3393 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
,
3394 &vmw_cmd_dx_set_shader_res
, true, false, true),
3395 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER
, &vmw_cmd_dx_set_shader
,
3397 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS
, &vmw_cmd_dx_cid_check
,
3399 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW
, &vmw_cmd_dx_cid_check
,
3401 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED
, &vmw_cmd_dx_cid_check
,
3403 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED
, &vmw_cmd_dx_cid_check
,
3405 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED
,
3406 &vmw_cmd_dx_cid_check
, true, false, true),
3407 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO
, &vmw_cmd_dx_cid_check
,
3409 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
,
3410 &vmw_cmd_dx_set_vertex_buffers
, true, false, true),
3411 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER
,
3412 &vmw_cmd_dx_set_index_buffer
, true, false, true),
3413 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS
,
3414 &vmw_cmd_dx_set_rendertargets
, true, false, true),
3415 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE
, &vmw_cmd_dx_cid_check
,
3417 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE
,
3418 &vmw_cmd_dx_cid_check
, true, false, true),
3419 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE
,
3420 &vmw_cmd_dx_cid_check
, true, false, true),
3421 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY
, &vmw_cmd_dx_define_query
,
3423 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY
, &vmw_cmd_dx_cid_check
,
3425 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY
, &vmw_cmd_dx_bind_query
,
3427 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET
,
3428 &vmw_cmd_dx_cid_check
, true, false, true),
3429 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY
, &vmw_cmd_dx_cid_check
,
3431 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY
, &vmw_cmd_dx_cid_check
,
3433 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY
, &vmw_cmd_invalid
,
3435 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION
, &vmw_cmd_dx_cid_check
,
3437 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS
, &vmw_cmd_dx_cid_check
,
3439 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS
, &vmw_cmd_dx_cid_check
,
3441 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW
,
3442 &vmw_cmd_dx_clear_rendertarget_view
, true, false, true),
3443 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW
,
3444 &vmw_cmd_dx_clear_depthstencil_view
, true, false, true),
3445 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY
, &vmw_cmd_invalid
,
3447 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS
, &vmw_cmd_dx_genmips
,
3449 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE
,
3450 &vmw_cmd_dx_check_subresource
, true, false, true),
3451 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE
,
3452 &vmw_cmd_dx_check_subresource
, true, false, true),
3453 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE
,
3454 &vmw_cmd_dx_check_subresource
, true, false, true),
3455 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW
,
3456 &vmw_cmd_dx_view_define
, true, false, true),
3457 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW
,
3458 &vmw_cmd_dx_view_remove
, true, false, true),
3459 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW
,
3460 &vmw_cmd_dx_view_define
, true, false, true),
3461 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW
,
3462 &vmw_cmd_dx_view_remove
, true, false, true),
3463 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW
,
3464 &vmw_cmd_dx_view_define
, true, false, true),
3465 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW
,
3466 &vmw_cmd_dx_view_remove
, true, false, true),
3467 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT
,
3468 &vmw_cmd_dx_so_define
, true, false, true),
3469 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT
,
3470 &vmw_cmd_dx_cid_check
, true, false, true),
3471 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE
,
3472 &vmw_cmd_dx_so_define
, true, false, true),
3473 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE
,
3474 &vmw_cmd_dx_cid_check
, true, false, true),
3475 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE
,
3476 &vmw_cmd_dx_so_define
, true, false, true),
3477 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE
,
3478 &vmw_cmd_dx_cid_check
, true, false, true),
3479 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE
,
3480 &vmw_cmd_dx_so_define
, true, false, true),
3481 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE
,
3482 &vmw_cmd_dx_cid_check
, true, false, true),
3483 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE
,
3484 &vmw_cmd_dx_so_define
, true, false, true),
3485 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE
,
3486 &vmw_cmd_dx_cid_check
, true, false, true),
3487 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER
,
3488 &vmw_cmd_dx_define_shader
, true, false, true),
3489 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER
,
3490 &vmw_cmd_dx_destroy_shader
, true, false, true),
3491 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER
,
3492 &vmw_cmd_dx_bind_shader
, true, false, true),
3493 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT
,
3494 &vmw_cmd_dx_so_define
, true, false, true),
3495 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT
,
3496 &vmw_cmd_dx_destroy_streamoutput
, true, false, true),
3497 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT
,
3498 &vmw_cmd_dx_set_streamoutput
, true, false, true),
3499 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS
,
3500 &vmw_cmd_dx_set_so_targets
, true, false, true),
3501 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT
,
3502 &vmw_cmd_dx_cid_check
, true, false, true),
3503 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY
,
3504 &vmw_cmd_dx_cid_check
, true, false, true),
3505 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY
,
3506 &vmw_cmd_buffer_copy_check
, true, false, true),
3507 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION
,
3508 &vmw_cmd_pred_copy_check
, true, false, true),
3509 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER
,
3510 &vmw_cmd_dx_transfer_from_buffer
,
3512 VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY
, &vmw_cmd_intra_surface_copy
,
3518 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW
, &vmw_cmd_sm5_view_define
,
3520 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW
, &vmw_cmd_sm5_view_remove
,
3522 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT
, &vmw_cmd_clear_uav_uint
,
3524 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT
,
3525 &vmw_cmd_clear_uav_float
, true, false, true),
3526 VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT
, &vmw_cmd_invalid
, true,
3528 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS
, &vmw_cmd_set_uav
, true, false,
3530 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT
,
3531 &vmw_cmd_indexed_instanced_indirect
, true, false, true),
3532 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT
,
3533 &vmw_cmd_instanced_indirect
, true, false, true),
3534 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH
, &vmw_cmd_sm5
, true, false, true),
3535 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT
,
3536 &vmw_cmd_dispatch_indirect
, true, false, true),
3537 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS
, &vmw_cmd_set_cs_uav
, true,
3539 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2
,
3540 &vmw_cmd_sm5_view_define
, true, false, true),
3541 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB
,
3542 &vmw_cmd_dx_define_streamoutput
, true, false, true),
3543 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT
,
3544 &vmw_cmd_dx_bind_streamoutput
, true, false, true),
3547 bool vmw_cmd_describe(const void *buf
, u32
*size
, char const **cmd
)
3549 u32 cmd_id
= ((u32
*) buf
)[0];
3551 if (cmd_id
>= SVGA_CMD_MAX
) {
3552 SVGA3dCmdHeader
*header
= (SVGA3dCmdHeader
*) buf
;
3553 const struct vmw_cmd_entry
*entry
;
3555 *size
= header
->size
+ sizeof(SVGA3dCmdHeader
);
3556 cmd_id
= header
->id
;
3557 if (cmd_id
>= SVGA_3D_CMD_MAX
)
3560 cmd_id
-= SVGA_3D_CMD_BASE
;
3561 entry
= &vmw_cmd_entries
[cmd_id
];
3562 *cmd
= entry
->cmd_name
;
3567 case SVGA_CMD_UPDATE
:
3568 *cmd
= "SVGA_CMD_UPDATE";
3569 *size
= sizeof(u32
) + sizeof(SVGAFifoCmdUpdate
);
3571 case SVGA_CMD_DEFINE_GMRFB
:
3572 *cmd
= "SVGA_CMD_DEFINE_GMRFB";
3573 *size
= sizeof(u32
) + sizeof(SVGAFifoCmdDefineGMRFB
);
3575 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN
:
3576 *cmd
= "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3577 *size
= sizeof(u32
) + sizeof(SVGAFifoCmdBlitGMRFBToScreen
);
3579 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB
:
3580 *cmd
= "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3581 *size
= sizeof(u32
) + sizeof(SVGAFifoCmdBlitGMRFBToScreen
);
3592 static int vmw_cmd_check(struct vmw_private
*dev_priv
,
3593 struct vmw_sw_context
*sw_context
, void *buf
,
3597 uint32_t size_remaining
= *size
;
3598 SVGA3dCmdHeader
*header
= (SVGA3dCmdHeader
*) buf
;
3600 const struct vmw_cmd_entry
*entry
;
3601 bool gb
= dev_priv
->capabilities
& SVGA_CAP_GBOBJECTS
;
3603 cmd_id
= ((uint32_t *)buf
)[0];
3604 /* Handle any none 3D commands */
3605 if (unlikely(cmd_id
< SVGA_CMD_MAX
))
3606 return vmw_cmd_check_not_3d(dev_priv
, sw_context
, buf
, size
);
3609 cmd_id
= header
->id
;
3610 *size
= header
->size
+ sizeof(SVGA3dCmdHeader
);
3612 cmd_id
-= SVGA_3D_CMD_BASE
;
3613 if (unlikely(*size
> size_remaining
))
3616 if (unlikely(cmd_id
>= SVGA_3D_CMD_MAX
- SVGA_3D_CMD_BASE
))
3619 entry
= &vmw_cmd_entries
[cmd_id
];
3620 if (unlikely(!entry
->func
))
3623 if (unlikely(!entry
->user_allow
&& !sw_context
->kernel
))
3624 goto out_privileged
;
3626 if (unlikely(entry
->gb_disable
&& gb
))
3629 if (unlikely(entry
->gb_enable
&& !gb
))
3632 ret
= entry
->func(dev_priv
, sw_context
, header
);
3633 if (unlikely(ret
!= 0)) {
3634 VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3635 cmd_id
+ SVGA_3D_CMD_BASE
, ret
);
3641 VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3642 cmd_id
+ SVGA_3D_CMD_BASE
);
3645 VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3646 cmd_id
+ SVGA_3D_CMD_BASE
);
3649 VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3650 cmd_id
+ SVGA_3D_CMD_BASE
);
3653 VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3654 cmd_id
+ SVGA_3D_CMD_BASE
);
3658 static int vmw_cmd_check_all(struct vmw_private
*dev_priv
,
3659 struct vmw_sw_context
*sw_context
, void *buf
,
3662 int32_t cur_size
= size
;
3665 sw_context
->buf_start
= buf
;
3667 while (cur_size
> 0) {
3669 ret
= vmw_cmd_check(dev_priv
, sw_context
, buf
, &size
);
3670 if (unlikely(ret
!= 0))
3672 buf
= (void *)((unsigned long) buf
+ size
);
3676 if (unlikely(cur_size
!= 0)) {
3677 VMW_DEBUG_USER("Command verifier out of sync.\n");
3684 static void vmw_free_relocations(struct vmw_sw_context
*sw_context
)
3686 /* Memory is validation context memory, so no need to free it */
3687 INIT_LIST_HEAD(&sw_context
->bo_relocations
);
3690 static void vmw_apply_relocations(struct vmw_sw_context
*sw_context
)
3692 struct vmw_relocation
*reloc
;
3693 struct ttm_buffer_object
*bo
;
3695 list_for_each_entry(reloc
, &sw_context
->bo_relocations
, head
) {
3696 bo
= &reloc
->vbo
->base
;
3697 switch (bo
->mem
.mem_type
) {
3699 reloc
->location
->offset
+= bo
->mem
.start
<< PAGE_SHIFT
;
3700 reloc
->location
->gmrId
= SVGA_GMR_FRAMEBUFFER
;
3703 reloc
->location
->gmrId
= bo
->mem
.start
;
3706 *reloc
->mob_loc
= bo
->mem
.start
;
3712 vmw_free_relocations(sw_context
);
3715 static int vmw_resize_cmd_bounce(struct vmw_sw_context
*sw_context
,
3718 if (likely(sw_context
->cmd_bounce_size
>= size
))
3721 if (sw_context
->cmd_bounce_size
== 0)
3722 sw_context
->cmd_bounce_size
= VMWGFX_CMD_BOUNCE_INIT_SIZE
;
3724 while (sw_context
->cmd_bounce_size
< size
) {
3725 sw_context
->cmd_bounce_size
=
3726 PAGE_ALIGN(sw_context
->cmd_bounce_size
+
3727 (sw_context
->cmd_bounce_size
>> 1));
3730 vfree(sw_context
->cmd_bounce
);
3731 sw_context
->cmd_bounce
= vmalloc(sw_context
->cmd_bounce_size
);
3733 if (sw_context
->cmd_bounce
== NULL
) {
3734 VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3735 sw_context
->cmd_bounce_size
= 0;
3743 * vmw_execbuf_fence_commands - create and submit a command stream fence
3745 * Creates a fence object and submits a command stream marker.
3746 * If this fails for some reason, We sync the fifo and return NULL.
3747 * It is then safe to fence buffers with a NULL pointer.
3749 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3750 * userspace handle if @p_handle is not NULL, otherwise not.
3753 int vmw_execbuf_fence_commands(struct drm_file
*file_priv
,
3754 struct vmw_private
*dev_priv
,
3755 struct vmw_fence_obj
**p_fence
,
3760 bool synced
= false;
3762 /* p_handle implies file_priv. */
3763 BUG_ON(p_handle
!= NULL
&& file_priv
== NULL
);
3765 ret
= vmw_fifo_send_fence(dev_priv
, &sequence
);
3766 if (unlikely(ret
!= 0)) {
3767 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3771 if (p_handle
!= NULL
)
3772 ret
= vmw_user_fence_create(file_priv
, dev_priv
->fman
,
3773 sequence
, p_fence
, p_handle
);
3775 ret
= vmw_fence_create(dev_priv
->fman
, sequence
, p_fence
);
3777 if (unlikely(ret
!= 0 && !synced
)) {
3778 (void) vmw_fallback_wait(dev_priv
, false, false, sequence
,
3779 false, VMW_FENCE_WAIT_TIMEOUT
);
3787 * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
3789 * @dev_priv: Pointer to a vmw_private struct.
3790 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3791 * @ret: Return value from fence object creation.
3792 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3793 * the information should be copied.
3794 * @fence: Pointer to the fenc object.
3795 * @fence_handle: User-space fence handle.
3796 * @out_fence_fd: exported file descriptor for the fence. -1 if not used
3797 * @sync_file: Only used to clean up in case of an error in this function.
3799 * This function copies fence information to user-space. If copying fails, the
3800 * user-space struct drm_vmw_fence_rep::error member is hopefully left
3801 * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3802 * will hopefully be detected.
3804 * Also if copying fails, user-space will be unable to signal the fence object
3805 * so we wait for it immediately, and then unreference the user-space reference.
3808 vmw_execbuf_copy_fence_user(struct vmw_private
*dev_priv
,
3809 struct vmw_fpriv
*vmw_fp
, int ret
,
3810 struct drm_vmw_fence_rep __user
*user_fence_rep
,
3811 struct vmw_fence_obj
*fence
, uint32_t fence_handle
,
3812 int32_t out_fence_fd
, struct sync_file
*sync_file
)
3814 struct drm_vmw_fence_rep fence_rep
;
3816 if (user_fence_rep
== NULL
)
3819 memset(&fence_rep
, 0, sizeof(fence_rep
));
3821 fence_rep
.error
= ret
;
3822 fence_rep
.fd
= out_fence_fd
;
3824 BUG_ON(fence
== NULL
);
3826 fence_rep
.handle
= fence_handle
;
3827 fence_rep
.seqno
= fence
->base
.seqno
;
3828 vmw_update_seqno(dev_priv
, &dev_priv
->fifo
);
3829 fence_rep
.passed_seqno
= dev_priv
->last_read_seqno
;
3833 * copy_to_user errors will be detected by user space not seeing
3834 * fence_rep::error filled in. Typically user-space would have pre-set
3835 * that member to -EFAULT.
3837 ret
= copy_to_user(user_fence_rep
, &fence_rep
,
3841 * User-space lost the fence object. We need to sync and unreference the
3844 if (unlikely(ret
!= 0) && (fence_rep
.error
== 0)) {
3846 fput(sync_file
->file
);
3848 if (fence_rep
.fd
!= -1) {
3849 put_unused_fd(fence_rep
.fd
);
3853 ttm_ref_object_base_unref(vmw_fp
->tfile
, fence_handle
,
3855 VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3856 (void) vmw_fence_obj_wait(fence
, false, false,
3857 VMW_FENCE_WAIT_TIMEOUT
);
3862 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3864 * @dev_priv: Pointer to a device private structure.
3865 * @kernel_commands: Pointer to the unpatched command batch.
3866 * @command_size: Size of the unpatched command batch.
3867 * @sw_context: Structure holding the relocation lists.
3869 * Side effects: If this function returns 0, then the command batch pointed to
3870 * by @kernel_commands will have been modified.
3872 static int vmw_execbuf_submit_fifo(struct vmw_private
*dev_priv
,
3873 void *kernel_commands
, u32 command_size
,
3874 struct vmw_sw_context
*sw_context
)
3878 if (sw_context
->dx_ctx_node
)
3879 cmd
= VMW_FIFO_RESERVE_DX(dev_priv
, command_size
,
3880 sw_context
->dx_ctx_node
->ctx
->id
);
3882 cmd
= VMW_FIFO_RESERVE(dev_priv
, command_size
);
3887 vmw_apply_relocations(sw_context
);
3888 memcpy(cmd
, kernel_commands
, command_size
);
3889 vmw_resource_relocations_apply(cmd
, &sw_context
->res_relocations
);
3890 vmw_resource_relocations_free(&sw_context
->res_relocations
);
3891 vmw_fifo_commit(dev_priv
, command_size
);
3897 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3898 * command buffer manager.
3900 * @dev_priv: Pointer to a device private structure.
3901 * @header: Opaque handle to the command buffer allocation.
3902 * @command_size: Size of the unpatched command batch.
3903 * @sw_context: Structure holding the relocation lists.
3905 * Side effects: If this function returns 0, then the command buffer represented
3906 * by @header will have been modified.
3908 static int vmw_execbuf_submit_cmdbuf(struct vmw_private
*dev_priv
,
3909 struct vmw_cmdbuf_header
*header
,
3911 struct vmw_sw_context
*sw_context
)
3913 u32 id
= ((sw_context
->dx_ctx_node
) ? sw_context
->dx_ctx_node
->ctx
->id
:
3915 void *cmd
= vmw_cmdbuf_reserve(dev_priv
->cman
, command_size
, id
, false,
3918 vmw_apply_relocations(sw_context
);
3919 vmw_resource_relocations_apply(cmd
, &sw_context
->res_relocations
);
3920 vmw_resource_relocations_free(&sw_context
->res_relocations
);
3921 vmw_cmdbuf_commit(dev_priv
->cman
, command_size
, header
, false);
3927 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3928 * submission using a command buffer.
3930 * @dev_priv: Pointer to a device private structure.
3931 * @user_commands: User-space pointer to the commands to be submitted.
3932 * @command_size: Size of the unpatched command batch.
3933 * @header: Out parameter returning the opaque pointer to the command buffer.
3935 * This function checks whether we can use the command buffer manager for
3936 * submission and if so, creates a command buffer of suitable size and copies
3937 * the user data into that buffer.
3939 * On successful return, the function returns a pointer to the data in the
3940 * command buffer and *@header is set to non-NULL.
3942 * If command buffers could not be used, the function will return the value of
3943 * @kernel_commands on function call. That value may be NULL. In that case, the
3944 * value of *@header will be set to NULL.
3946 * If an error is encountered, the function will return a pointer error value.
3947 * If the function is interrupted by a signal while sleeping, it will return
3948 * -ERESTARTSYS casted to a pointer error value.
3950 static void *vmw_execbuf_cmdbuf(struct vmw_private
*dev_priv
,
3951 void __user
*user_commands
,
3952 void *kernel_commands
, u32 command_size
,
3953 struct vmw_cmdbuf_header
**header
)
3959 if (command_size
> SVGA_CB_MAX_SIZE
) {
3960 VMW_DEBUG_USER("Command buffer is too large.\n");
3961 return ERR_PTR(-EINVAL
);
3964 if (!dev_priv
->cman
|| kernel_commands
)
3965 return kernel_commands
;
3967 /* If possible, add a little space for fencing. */
3968 cmdbuf_size
= command_size
+ 512;
3969 cmdbuf_size
= min_t(size_t, cmdbuf_size
, SVGA_CB_MAX_SIZE
);
3970 kernel_commands
= vmw_cmdbuf_alloc(dev_priv
->cman
, cmdbuf_size
, true,
3972 if (IS_ERR(kernel_commands
))
3973 return kernel_commands
;
3975 ret
= copy_from_user(kernel_commands
, user_commands
, command_size
);
3977 VMW_DEBUG_USER("Failed copying commands.\n");
3978 vmw_cmdbuf_header_free(*header
);
3980 return ERR_PTR(-EFAULT
);
3983 return kernel_commands
;
3986 static int vmw_execbuf_tie_context(struct vmw_private
*dev_priv
,
3987 struct vmw_sw_context
*sw_context
,
3990 struct vmw_resource
*res
;
3994 if (handle
== SVGA3D_INVALID_ID
)
3997 size
= vmw_execbuf_res_size(dev_priv
, vmw_res_dx_context
);
3998 ret
= vmw_validation_preload_res(sw_context
->ctx
, size
);
4002 res
= vmw_user_resource_noref_lookup_handle
4003 (dev_priv
, sw_context
->fp
->tfile
, handle
,
4004 user_context_converter
);
4006 VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
4007 (unsigned int) handle
);
4008 return PTR_ERR(res
);
4011 ret
= vmw_execbuf_res_noref_val_add(sw_context
, res
, VMW_RES_DIRTY_SET
);
4012 if (unlikely(ret
!= 0))
4015 sw_context
->dx_ctx_node
= vmw_execbuf_info_from_res(sw_context
, res
);
4016 sw_context
->man
= vmw_context_res_man(res
);
4021 int vmw_execbuf_process(struct drm_file
*file_priv
,
4022 struct vmw_private
*dev_priv
,
4023 void __user
*user_commands
, void *kernel_commands
,
4024 uint32_t command_size
, uint64_t throttle_us
,
4025 uint32_t dx_context_handle
,
4026 struct drm_vmw_fence_rep __user
*user_fence_rep
,
4027 struct vmw_fence_obj
**out_fence
, uint32_t flags
)
4029 struct vmw_sw_context
*sw_context
= &dev_priv
->ctx
;
4030 struct vmw_fence_obj
*fence
= NULL
;
4031 struct vmw_cmdbuf_header
*header
;
4032 uint32_t handle
= 0;
4034 int32_t out_fence_fd
= -1;
4035 struct sync_file
*sync_file
= NULL
;
4036 DECLARE_VAL_CONTEXT(val_ctx
, &sw_context
->res_ht
, 1);
4038 vmw_validation_set_val_mem(&val_ctx
, &dev_priv
->vvm
);
4040 if (flags
& DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD
) {
4041 out_fence_fd
= get_unused_fd_flags(O_CLOEXEC
);
4042 if (out_fence_fd
< 0) {
4043 VMW_DEBUG_USER("Failed to get a fence fd.\n");
4044 return out_fence_fd
;
4049 ret
= vmw_wait_lag(dev_priv
, &dev_priv
->fifo
.marker_queue
,
4053 goto out_free_fence_fd
;
4056 kernel_commands
= vmw_execbuf_cmdbuf(dev_priv
, user_commands
,
4057 kernel_commands
, command_size
,
4059 if (IS_ERR(kernel_commands
)) {
4060 ret
= PTR_ERR(kernel_commands
);
4061 goto out_free_fence_fd
;
4064 ret
= mutex_lock_interruptible(&dev_priv
->cmdbuf_mutex
);
4067 goto out_free_header
;
4070 sw_context
->kernel
= false;
4071 if (kernel_commands
== NULL
) {
4072 ret
= vmw_resize_cmd_bounce(sw_context
, command_size
);
4073 if (unlikely(ret
!= 0))
4076 ret
= copy_from_user(sw_context
->cmd_bounce
, user_commands
,
4078 if (unlikely(ret
!= 0)) {
4080 VMW_DEBUG_USER("Failed copying commands.\n");
4084 kernel_commands
= sw_context
->cmd_bounce
;
4085 } else if (!header
) {
4086 sw_context
->kernel
= true;
4089 sw_context
->fp
= vmw_fpriv(file_priv
);
4090 INIT_LIST_HEAD(&sw_context
->ctx_list
);
4091 sw_context
->cur_query_bo
= dev_priv
->pinned_bo
;
4092 sw_context
->last_query_ctx
= NULL
;
4093 sw_context
->needs_post_query_barrier
= false;
4094 sw_context
->dx_ctx_node
= NULL
;
4095 sw_context
->dx_query_mob
= NULL
;
4096 sw_context
->dx_query_ctx
= NULL
;
4097 memset(sw_context
->res_cache
, 0, sizeof(sw_context
->res_cache
));
4098 INIT_LIST_HEAD(&sw_context
->res_relocations
);
4099 INIT_LIST_HEAD(&sw_context
->bo_relocations
);
4101 if (sw_context
->staged_bindings
)
4102 vmw_binding_state_reset(sw_context
->staged_bindings
);
4104 if (!sw_context
->res_ht_initialized
) {
4105 ret
= drm_ht_create(&sw_context
->res_ht
, VMW_RES_HT_ORDER
);
4106 if (unlikely(ret
!= 0))
4109 sw_context
->res_ht_initialized
= true;
4112 INIT_LIST_HEAD(&sw_context
->staged_cmd_res
);
4113 sw_context
->ctx
= &val_ctx
;
4114 ret
= vmw_execbuf_tie_context(dev_priv
, sw_context
, dx_context_handle
);
4115 if (unlikely(ret
!= 0))
4118 ret
= vmw_cmd_check_all(dev_priv
, sw_context
, kernel_commands
,
4120 if (unlikely(ret
!= 0))
4123 ret
= vmw_resources_reserve(sw_context
);
4124 if (unlikely(ret
!= 0))
4127 ret
= vmw_validation_bo_reserve(&val_ctx
, true);
4128 if (unlikely(ret
!= 0))
4131 ret
= vmw_validation_bo_validate(&val_ctx
, true);
4132 if (unlikely(ret
!= 0))
4135 ret
= vmw_validation_res_validate(&val_ctx
, true);
4136 if (unlikely(ret
!= 0))
4139 vmw_validation_drop_ht(&val_ctx
);
4141 ret
= mutex_lock_interruptible(&dev_priv
->binding_mutex
);
4142 if (unlikely(ret
!= 0)) {
4147 if (dev_priv
->has_mob
) {
4148 ret
= vmw_rebind_contexts(sw_context
);
4149 if (unlikely(ret
!= 0))
4150 goto out_unlock_binding
;
4154 ret
= vmw_execbuf_submit_fifo(dev_priv
, kernel_commands
,
4155 command_size
, sw_context
);
4157 ret
= vmw_execbuf_submit_cmdbuf(dev_priv
, header
, command_size
,
4161 mutex_unlock(&dev_priv
->binding_mutex
);
4165 vmw_query_bo_switch_commit(dev_priv
, sw_context
);
4166 ret
= vmw_execbuf_fence_commands(file_priv
, dev_priv
, &fence
,
4167 (user_fence_rep
) ? &handle
: NULL
);
4169 * This error is harmless, because if fence submission fails,
4170 * vmw_fifo_send_fence will sync. The error will be propagated to
4171 * user-space in @fence_rep
4174 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
4176 vmw_execbuf_bindings_commit(sw_context
, false);
4177 vmw_bind_dx_query_mob(sw_context
);
4178 vmw_validation_res_unreserve(&val_ctx
, false);
4180 vmw_validation_bo_fence(sw_context
->ctx
, fence
);
4182 if (unlikely(dev_priv
->pinned_bo
!= NULL
&& !dev_priv
->query_cid_valid
))
4183 __vmw_execbuf_release_pinned_bo(dev_priv
, fence
);
4186 * If anything fails here, give up trying to export the fence and do a
4187 * sync since the user mode will not be able to sync the fence itself.
4188 * This ensures we are still functionally correct.
4190 if (flags
& DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD
) {
4192 sync_file
= sync_file_create(&fence
->base
);
4194 VMW_DEBUG_USER("Sync file create failed for fence\n");
4195 put_unused_fd(out_fence_fd
);
4198 (void) vmw_fence_obj_wait(fence
, false, false,
4199 VMW_FENCE_WAIT_TIMEOUT
);
4201 /* Link the fence with the FD created earlier */
4202 fd_install(out_fence_fd
, sync_file
->file
);
4206 vmw_execbuf_copy_fence_user(dev_priv
, vmw_fpriv(file_priv
), ret
,
4207 user_fence_rep
, fence
, handle
, out_fence_fd
,
4210 /* Don't unreference when handing fence out */
4211 if (unlikely(out_fence
!= NULL
)) {
4214 } else if (likely(fence
!= NULL
)) {
4215 vmw_fence_obj_unreference(&fence
);
4218 vmw_cmdbuf_res_commit(&sw_context
->staged_cmd_res
);
4219 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
4222 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4223 * in resource destruction paths.
4225 vmw_validation_unref_lists(&val_ctx
);
4230 mutex_unlock(&dev_priv
->binding_mutex
);
4232 vmw_validation_bo_backoff(&val_ctx
);
4234 vmw_execbuf_bindings_commit(sw_context
, true);
4235 vmw_validation_res_unreserve(&val_ctx
, true);
4236 vmw_resource_relocations_free(&sw_context
->res_relocations
);
4237 vmw_free_relocations(sw_context
);
4238 if (unlikely(dev_priv
->pinned_bo
!= NULL
&& !dev_priv
->query_cid_valid
))
4239 __vmw_execbuf_release_pinned_bo(dev_priv
, NULL
);
4241 vmw_cmdbuf_res_revert(&sw_context
->staged_cmd_res
);
4242 vmw_validation_drop_ht(&val_ctx
);
4243 WARN_ON(!list_empty(&sw_context
->ctx_list
));
4244 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
4247 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4248 * in resource destruction paths.
4250 vmw_validation_unref_lists(&val_ctx
);
4253 vmw_cmdbuf_header_free(header
);
4255 if (out_fence_fd
>= 0)
4256 put_unused_fd(out_fence_fd
);
4262 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4264 * @dev_priv: The device private structure.
4266 * This function is called to idle the fifo and unpin the query buffer if the
4267 * normal way to do this hits an error, which should typically be extremely
4270 static void vmw_execbuf_unpin_panic(struct vmw_private
*dev_priv
)
4272 VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
4274 (void) vmw_fallback_wait(dev_priv
, false, true, 0, false, 10*HZ
);
4275 vmw_bo_pin_reserved(dev_priv
->pinned_bo
, false);
4276 if (dev_priv
->dummy_query_bo_pinned
) {
4277 vmw_bo_pin_reserved(dev_priv
->dummy_query_bo
, false);
4278 dev_priv
->dummy_query_bo_pinned
= false;
4284 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
4287 * @dev_priv: The device private structure.
4288 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
4289 * query barrier that flushes all queries touching the current buffer pointed to
4290 * by @dev_priv->pinned_bo
4292 * This function should be used to unpin the pinned query bo, or as a query
4293 * barrier when we need to make sure that all queries have finished before the
4294 * next fifo command. (For example on hardware context destructions where the
4295 * hardware may otherwise leak unfinished queries).
4297 * This function does not return any failure codes, but make attempts to do safe
4298 * unpinning in case of errors.
4300 * The function will synchronize on the previous query barrier, and will thus
4301 * not finish until that barrier has executed.
4303 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
4304 * calling this function.
4306 void __vmw_execbuf_release_pinned_bo(struct vmw_private
*dev_priv
,
4307 struct vmw_fence_obj
*fence
)
4310 struct vmw_fence_obj
*lfence
= NULL
;
4311 DECLARE_VAL_CONTEXT(val_ctx
, NULL
, 0);
4313 if (dev_priv
->pinned_bo
== NULL
)
4316 ret
= vmw_validation_add_bo(&val_ctx
, dev_priv
->pinned_bo
, false,
4319 goto out_no_reserve
;
4321 ret
= vmw_validation_add_bo(&val_ctx
, dev_priv
->dummy_query_bo
, false,
4324 goto out_no_reserve
;
4326 ret
= vmw_validation_bo_reserve(&val_ctx
, false);
4328 goto out_no_reserve
;
4330 if (dev_priv
->query_cid_valid
) {
4331 BUG_ON(fence
!= NULL
);
4332 ret
= vmw_fifo_emit_dummy_query(dev_priv
, dev_priv
->query_cid
);
4335 dev_priv
->query_cid_valid
= false;
4338 vmw_bo_pin_reserved(dev_priv
->pinned_bo
, false);
4339 if (dev_priv
->dummy_query_bo_pinned
) {
4340 vmw_bo_pin_reserved(dev_priv
->dummy_query_bo
, false);
4341 dev_priv
->dummy_query_bo_pinned
= false;
4343 if (fence
== NULL
) {
4344 (void) vmw_execbuf_fence_commands(NULL
, dev_priv
, &lfence
,
4348 vmw_validation_bo_fence(&val_ctx
, fence
);
4350 vmw_fence_obj_unreference(&lfence
);
4352 vmw_validation_unref_lists(&val_ctx
);
4353 vmw_bo_unreference(&dev_priv
->pinned_bo
);
4358 vmw_validation_bo_backoff(&val_ctx
);
4360 vmw_validation_unref_lists(&val_ctx
);
4361 vmw_execbuf_unpin_panic(dev_priv
);
4362 vmw_bo_unreference(&dev_priv
->pinned_bo
);
4366 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
4368 * @dev_priv: The device private structure.
4370 * This function should be used to unpin the pinned query bo, or as a query
4371 * barrier when we need to make sure that all queries have finished before the
4372 * next fifo command. (For example on hardware context destructions where the
4373 * hardware may otherwise leak unfinished queries).
4375 * This function does not return any failure codes, but make attempts to do safe
4376 * unpinning in case of errors.
4378 * The function will synchronize on the previous query barrier, and will thus
4379 * not finish until that barrier has executed.
4381 void vmw_execbuf_release_pinned_bo(struct vmw_private
*dev_priv
)
4383 mutex_lock(&dev_priv
->cmdbuf_mutex
);
4384 if (dev_priv
->query_cid_valid
)
4385 __vmw_execbuf_release_pinned_bo(dev_priv
, NULL
);
4386 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
4389 int vmw_execbuf_ioctl(struct drm_device
*dev
, void *data
,
4390 struct drm_file
*file_priv
)
4392 struct vmw_private
*dev_priv
= vmw_priv(dev
);
4393 struct drm_vmw_execbuf_arg
*arg
= data
;
4395 struct dma_fence
*in_fence
= NULL
;
4398 * Extend the ioctl argument while maintaining backwards compatibility:
4399 * We take different code paths depending on the value of arg->version.
4401 * Note: The ioctl argument is extended and zeropadded by core DRM.
4403 if (unlikely(arg
->version
> DRM_VMW_EXECBUF_VERSION
||
4404 arg
->version
== 0)) {
4405 VMW_DEBUG_USER("Incorrect execbuf version.\n");
4409 switch (arg
->version
) {
4411 /* For v1 core DRM have extended + zeropadded the data */
4412 arg
->context_handle
= (uint32_t) -1;
4416 /* For v2 and later core DRM would have correctly copied it */
4420 /* If imported a fence FD from elsewhere, then wait on it */
4421 if (arg
->flags
& DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD
) {
4422 in_fence
= sync_file_get_fence(arg
->imported_fence_fd
);
4425 VMW_DEBUG_USER("Cannot get imported fence\n");
4429 ret
= vmw_wait_dma_fence(dev_priv
->fman
, in_fence
);
4434 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
4435 if (unlikely(ret
!= 0))
4438 ret
= vmw_execbuf_process(file_priv
, dev_priv
,
4439 (void __user
*)(unsigned long)arg
->commands
,
4440 NULL
, arg
->command_size
, arg
->throttle_us
,
4441 arg
->context_handle
,
4442 (void __user
*)(unsigned long)arg
->fence_rep
,
4445 ttm_read_unlock(&dev_priv
->reservation_sem
);
4446 if (unlikely(ret
!= 0))
4449 vmw_kms_cursor_post_execbuf(dev_priv
);
4453 dma_fence_put(in_fence
);