1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
27 #include <linux/sync_file.h>
29 #include "vmwgfx_drv.h"
30 #include "vmwgfx_reg.h"
31 #include <drm/ttm/ttm_bo_api.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include "vmwgfx_so.h"
34 #include "vmwgfx_binding.h"
36 #define VMW_RES_HT_ORDER 12
39 * Helper macro to get dx_ctx_node if available otherwise print an error
40 * message. This is for use in command verifier function where if dx_ctx_node
41 * is not set then command is invalid.
43 #define VMW_GET_CTX_NODE(__sw_context) \
45 __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \
46 VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \
47 __sw_context->dx_ctx_node; \
51 #define VMW_DECLARE_CMD_VAR(__var, __type) \
53 SVGA3dCmdHeader header; \
58 * struct vmw_relocation - Buffer object relocation
60 * @head: List head for the command submission context's relocation list
61 * @vbo: Non ref-counted pointer to buffer object
62 * @mob_loc: Pointer to location for mob id to be modified
63 * @location: Pointer to location for guest pointer to be modified
65 struct vmw_relocation
{
66 struct list_head head
;
67 struct vmw_buffer_object
*vbo
;
70 SVGAGuestPtr
*location
;
75 * enum vmw_resource_relocation_type - Relocation type for resources
77 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
78 * command stream is replaced with the actual id after validation.
79 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
81 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
82 * validation is -1, the command is replaced with a NOP. Otherwise no action.
84 enum vmw_resource_relocation_type
{
92 * struct vmw_resource_relocation - Relocation info for resources
94 * @head: List head for the software context's relocation list.
95 * @res: Non-ref-counted pointer to the resource.
96 * @offset: Offset of single byte entries into the command buffer where the id
97 * that needs fixup is located.
98 * @rel_type: Type of relocation.
100 struct vmw_resource_relocation
{
101 struct list_head head
;
102 const struct vmw_resource
*res
;
104 enum vmw_resource_relocation_type rel_type
:3;
108 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
110 * @head: List head of context list
111 * @ctx: The context resource
112 * @cur: The context's persistent binding state
113 * @staged: The binding state changes of this command buffer
115 struct vmw_ctx_validation_info
{
116 struct list_head head
;
117 struct vmw_resource
*ctx
;
118 struct vmw_ctx_binding_state
*cur
;
119 struct vmw_ctx_binding_state
*staged
;
123 * struct vmw_cmd_entry - Describe a command for the verifier
125 * @user_allow: Whether allowed from the execbuf ioctl.
126 * @gb_disable: Whether disabled if guest-backed objects are available.
127 * @gb_enable: Whether enabled iff guest-backed objects are available.
129 struct vmw_cmd_entry
{
130 int (*func
) (struct vmw_private
*, struct vmw_sw_context
*,
135 const char *cmd_name
;
138 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
139 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
140 (_gb_disable), (_gb_enable), #_cmd}
142 static int vmw_resource_context_res_add(struct vmw_private
*dev_priv
,
143 struct vmw_sw_context
*sw_context
,
144 struct vmw_resource
*ctx
);
145 static int vmw_translate_mob_ptr(struct vmw_private
*dev_priv
,
146 struct vmw_sw_context
*sw_context
,
148 struct vmw_buffer_object
**vmw_bo_p
);
150 * vmw_ptr_diff - Compute the offset from a to b in bytes
152 * @a: A starting pointer.
153 * @b: A pointer offset in the same address space.
155 * Returns: The offset in bytes between the two pointers.
157 static size_t vmw_ptr_diff(void *a
, void *b
)
159 return (unsigned long) b
- (unsigned long) a
;
163 * vmw_execbuf_bindings_commit - Commit modified binding state
165 * @sw_context: The command submission context
166 * @backoff: Whether this is part of the error path and binding state changes
169 static void vmw_execbuf_bindings_commit(struct vmw_sw_context
*sw_context
,
172 struct vmw_ctx_validation_info
*entry
;
174 list_for_each_entry(entry
, &sw_context
->ctx_list
, head
) {
176 vmw_binding_state_commit(entry
->cur
, entry
->staged
);
178 if (entry
->staged
!= sw_context
->staged_bindings
)
179 vmw_binding_state_free(entry
->staged
);
181 sw_context
->staged_bindings_inuse
= false;
184 /* List entries are freed with the validation context */
185 INIT_LIST_HEAD(&sw_context
->ctx_list
);
189 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
191 * @sw_context: The command submission context
193 static void vmw_bind_dx_query_mob(struct vmw_sw_context
*sw_context
)
195 if (sw_context
->dx_query_mob
)
196 vmw_context_bind_dx_query(sw_context
->dx_query_ctx
,
197 sw_context
->dx_query_mob
);
201 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
204 * @dev_priv: Pointer to the device private:
205 * @sw_context: The command submission context
206 * @node: The validation node holding the context resource metadata
208 static int vmw_cmd_ctx_first_setup(struct vmw_private
*dev_priv
,
209 struct vmw_sw_context
*sw_context
,
210 struct vmw_resource
*res
,
211 struct vmw_ctx_validation_info
*node
)
215 ret
= vmw_resource_context_res_add(dev_priv
, sw_context
, res
);
216 if (unlikely(ret
!= 0))
219 if (!sw_context
->staged_bindings
) {
220 sw_context
->staged_bindings
= vmw_binding_state_alloc(dev_priv
);
221 if (IS_ERR(sw_context
->staged_bindings
)) {
222 ret
= PTR_ERR(sw_context
->staged_bindings
);
223 sw_context
->staged_bindings
= NULL
;
228 if (sw_context
->staged_bindings_inuse
) {
229 node
->staged
= vmw_binding_state_alloc(dev_priv
);
230 if (IS_ERR(node
->staged
)) {
231 ret
= PTR_ERR(node
->staged
);
236 node
->staged
= sw_context
->staged_bindings
;
237 sw_context
->staged_bindings_inuse
= true;
241 node
->cur
= vmw_context_binding_state(res
);
242 list_add_tail(&node
->head
, &sw_context
->ctx_list
);
251 * vmw_execbuf_res_size - calculate extra size fore the resource validation node
253 * @dev_priv: Pointer to the device private struct.
254 * @res_type: The resource type.
256 * Guest-backed contexts and DX contexts require extra size to store execbuf
257 * private information in the validation node. Typically the binding manager
258 * associated data structures.
260 * Returns: The extra size requirement based on resource type.
262 static unsigned int vmw_execbuf_res_size(struct vmw_private
*dev_priv
,
263 enum vmw_res_type res_type
)
265 return (res_type
== vmw_res_dx_context
||
266 (res_type
== vmw_res_context
&& dev_priv
->has_mob
)) ?
267 sizeof(struct vmw_ctx_validation_info
) : 0;
271 * vmw_execbuf_rcache_update - Update a resource-node cache entry
273 * @rcache: Pointer to the entry to update.
274 * @res: Pointer to the resource.
275 * @private: Pointer to the execbuf-private space in the resource validation
278 static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry
*rcache
,
279 struct vmw_resource
*res
,
283 rcache
->private = private;
285 rcache
->valid_handle
= 0;
289 * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
290 * rcu-protected pointer to the validation list.
292 * @sw_context: Pointer to the software context.
293 * @res: Unreferenced rcu-protected pointer to the resource.
294 * @dirty: Whether to change dirty status.
296 * Returns: 0 on success. Negative error code on failure. Typical error codes
297 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
299 static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context
*sw_context
,
300 struct vmw_resource
*res
,
303 struct vmw_private
*dev_priv
= res
->dev_priv
;
305 enum vmw_res_type res_type
= vmw_res_type(res
);
306 struct vmw_res_cache_entry
*rcache
;
307 struct vmw_ctx_validation_info
*ctx_info
;
309 unsigned int priv_size
;
311 rcache
= &sw_context
->res_cache
[res_type
];
312 if (likely(rcache
->valid
&& rcache
->res
== res
)) {
314 vmw_validation_res_set_dirty(sw_context
->ctx
,
315 rcache
->private, dirty
);
316 vmw_user_resource_noref_release();
320 priv_size
= vmw_execbuf_res_size(dev_priv
, res_type
);
321 ret
= vmw_validation_add_resource(sw_context
->ctx
, res
, priv_size
,
322 dirty
, (void **)&ctx_info
,
324 vmw_user_resource_noref_release();
328 if (priv_size
&& first_usage
) {
329 ret
= vmw_cmd_ctx_first_setup(dev_priv
, sw_context
, res
,
332 VMW_DEBUG_USER("Failed first usage context setup.\n");
337 vmw_execbuf_rcache_update(rcache
, res
, ctx_info
);
342 * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
343 * validation list if it's not already on it
345 * @sw_context: Pointer to the software context.
346 * @res: Pointer to the resource.
347 * @dirty: Whether to change dirty status.
349 * Returns: Zero on success. Negative error code on failure.
351 static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context
*sw_context
,
352 struct vmw_resource
*res
,
355 struct vmw_res_cache_entry
*rcache
;
356 enum vmw_res_type res_type
= vmw_res_type(res
);
360 rcache
= &sw_context
->res_cache
[res_type
];
361 if (likely(rcache
->valid
&& rcache
->res
== res
)) {
363 vmw_validation_res_set_dirty(sw_context
->ctx
,
364 rcache
->private, dirty
);
368 ret
= vmw_validation_add_resource(sw_context
->ctx
, res
, 0, dirty
,
373 vmw_execbuf_rcache_update(rcache
, res
, ptr
);
379 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
382 * @sw_context: The software context holding the validation list.
383 * @view: Pointer to the view resource.
385 * Returns 0 if success, negative error code otherwise.
387 static int vmw_view_res_val_add(struct vmw_sw_context
*sw_context
,
388 struct vmw_resource
*view
)
393 * First add the resource the view is pointing to, otherwise it may be
394 * swapped out when the view is validated.
396 ret
= vmw_execbuf_res_noctx_val_add(sw_context
, vmw_view_srf(view
),
397 vmw_view_dirtying(view
));
401 return vmw_execbuf_res_noctx_val_add(sw_context
, view
,
406 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
407 * to to the validation list.
409 * @sw_context: The software context holding the validation list.
410 * @view_type: The view type to look up.
411 * @id: view id of the view.
413 * The view is represented by a view id and the DX context it's created on, or
414 * scheduled for creation on. If there is no DX context set, the function will
415 * return an -EINVAL error pointer.
417 * Returns: Unreferenced pointer to the resource on success, negative error
418 * pointer on failure.
420 static struct vmw_resource
*
421 vmw_view_id_val_add(struct vmw_sw_context
*sw_context
,
422 enum vmw_view_type view_type
, u32 id
)
424 struct vmw_ctx_validation_info
*ctx_node
= sw_context
->dx_ctx_node
;
425 struct vmw_resource
*view
;
429 return ERR_PTR(-EINVAL
);
431 view
= vmw_view_lookup(sw_context
->man
, view_type
, id
);
435 ret
= vmw_view_res_val_add(sw_context
, view
);
443 * vmw_resource_context_res_add - Put resources previously bound to a context on
444 * the validation list
446 * @dev_priv: Pointer to a device private structure
447 * @sw_context: Pointer to a software context used for this command submission
448 * @ctx: Pointer to the context resource
450 * This function puts all resources that were previously bound to @ctx on the
451 * resource validation list. This is part of the context state reemission
453 static int vmw_resource_context_res_add(struct vmw_private
*dev_priv
,
454 struct vmw_sw_context
*sw_context
,
455 struct vmw_resource
*ctx
)
457 struct list_head
*binding_list
;
458 struct vmw_ctx_bindinfo
*entry
;
460 struct vmw_resource
*res
;
463 /* Add all cotables to the validation list. */
464 if (dev_priv
->has_dx
&& vmw_res_type(ctx
) == vmw_res_dx_context
) {
465 for (i
= 0; i
< SVGA_COTABLE_DX10_MAX
; ++i
) {
466 res
= vmw_context_cotable(ctx
, i
);
470 ret
= vmw_execbuf_res_noctx_val_add(sw_context
, res
,
472 if (unlikely(ret
!= 0))
477 /* Add all resources bound to the context to the validation list */
478 mutex_lock(&dev_priv
->binding_mutex
);
479 binding_list
= vmw_context_binding_list(ctx
);
481 list_for_each_entry(entry
, binding_list
, ctx_list
) {
482 if (vmw_res_type(entry
->res
) == vmw_res_view
)
483 ret
= vmw_view_res_val_add(sw_context
, entry
->res
);
485 ret
= vmw_execbuf_res_noctx_val_add
486 (sw_context
, entry
->res
,
487 vmw_binding_dirtying(entry
->bt
));
488 if (unlikely(ret
!= 0))
492 if (dev_priv
->has_dx
&& vmw_res_type(ctx
) == vmw_res_dx_context
) {
493 struct vmw_buffer_object
*dx_query_mob
;
495 dx_query_mob
= vmw_context_get_dx_query_mob(ctx
);
497 ret
= vmw_validation_add_bo(sw_context
->ctx
,
498 dx_query_mob
, true, false);
501 mutex_unlock(&dev_priv
->binding_mutex
);
506 * vmw_resource_relocation_add - Add a relocation to the relocation list
508 * @list: Pointer to head of relocation list.
509 * @res: The resource.
510 * @offset: Offset into the command buffer currently being parsed where the id
511 * that needs fixup is located. Granularity is one byte.
512 * @rel_type: Relocation type.
514 static int vmw_resource_relocation_add(struct vmw_sw_context
*sw_context
,
515 const struct vmw_resource
*res
,
516 unsigned long offset
,
517 enum vmw_resource_relocation_type
520 struct vmw_resource_relocation
*rel
;
522 rel
= vmw_validation_mem_alloc(sw_context
->ctx
, sizeof(*rel
));
523 if (unlikely(!rel
)) {
524 VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
529 rel
->offset
= offset
;
530 rel
->rel_type
= rel_type
;
531 list_add_tail(&rel
->head
, &sw_context
->res_relocations
);
537 * vmw_resource_relocations_free - Free all relocations on a list
539 * @list: Pointer to the head of the relocation list
541 static void vmw_resource_relocations_free(struct list_head
*list
)
543 /* Memory is validation context memory, so no need to free it */
544 INIT_LIST_HEAD(list
);
548 * vmw_resource_relocations_apply - Apply all relocations on a list
550 * @cb: Pointer to the start of the command buffer bein patch. This need not be
551 * the same buffer as the one being parsed when the relocation list was built,
552 * but the contents must be the same modulo the resource ids.
553 * @list: Pointer to the head of the relocation list.
555 static void vmw_resource_relocations_apply(uint32_t *cb
,
556 struct list_head
*list
)
558 struct vmw_resource_relocation
*rel
;
560 /* Validate the struct vmw_resource_relocation member size */
561 BUILD_BUG_ON(SVGA_CB_MAX_SIZE
>= (1 << 29));
562 BUILD_BUG_ON(vmw_res_rel_max
>= (1 << 3));
564 list_for_each_entry(rel
, list
, head
) {
565 u32
*addr
= (u32
*)((unsigned long) cb
+ rel
->offset
);
566 switch (rel
->rel_type
) {
567 case vmw_res_rel_normal
:
568 *addr
= rel
->res
->id
;
570 case vmw_res_rel_nop
:
571 *addr
= SVGA_3D_CMD_NOP
;
574 if (rel
->res
->id
== -1)
575 *addr
= SVGA_3D_CMD_NOP
;
581 static int vmw_cmd_invalid(struct vmw_private
*dev_priv
,
582 struct vmw_sw_context
*sw_context
,
583 SVGA3dCmdHeader
*header
)
588 static int vmw_cmd_ok(struct vmw_private
*dev_priv
,
589 struct vmw_sw_context
*sw_context
,
590 SVGA3dCmdHeader
*header
)
596 * vmw_resources_reserve - Reserve all resources on the sw_context's resource
599 * @sw_context: Pointer to the software context.
601 * Note that since vmware's command submission currently is protected by the
602 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
603 * only a single thread at once will attempt this.
605 static int vmw_resources_reserve(struct vmw_sw_context
*sw_context
)
609 ret
= vmw_validation_res_reserve(sw_context
->ctx
, true);
613 if (sw_context
->dx_query_mob
) {
614 struct vmw_buffer_object
*expected_dx_query_mob
;
616 expected_dx_query_mob
=
617 vmw_context_get_dx_query_mob(sw_context
->dx_query_ctx
);
618 if (expected_dx_query_mob
&&
619 expected_dx_query_mob
!= sw_context
->dx_query_mob
) {
628 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
629 * resource validate list unless it's already there.
631 * @dev_priv: Pointer to a device private structure.
632 * @sw_context: Pointer to the software context.
633 * @res_type: Resource type.
634 * @dirty: Whether to change dirty status.
635 * @converter: User-space visisble type specific information.
636 * @id_loc: Pointer to the location in the command buffer currently being parsed
637 * from where the user-space resource id handle is located.
638 * @p_val: Pointer to pointer to resource validalidation node. Populated on
642 vmw_cmd_res_check(struct vmw_private
*dev_priv
,
643 struct vmw_sw_context
*sw_context
,
644 enum vmw_res_type res_type
,
646 const struct vmw_user_resource_conv
*converter
,
648 struct vmw_resource
**p_res
)
650 struct vmw_res_cache_entry
*rcache
= &sw_context
->res_cache
[res_type
];
651 struct vmw_resource
*res
;
657 if (*id_loc
== SVGA3D_INVALID_ID
) {
658 if (res_type
== vmw_res_context
) {
659 VMW_DEBUG_USER("Illegal context invalid id.\n");
665 if (likely(rcache
->valid_handle
&& *id_loc
== rcache
->handle
)) {
668 vmw_validation_res_set_dirty(sw_context
->ctx
,
669 rcache
->private, dirty
);
671 unsigned int size
= vmw_execbuf_res_size(dev_priv
, res_type
);
673 ret
= vmw_validation_preload_res(sw_context
->ctx
, size
);
677 res
= vmw_user_resource_noref_lookup_handle
678 (dev_priv
, sw_context
->fp
->tfile
, *id_loc
, converter
);
680 VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
681 (unsigned int) *id_loc
);
685 ret
= vmw_execbuf_res_noref_val_add(sw_context
, res
, dirty
);
686 if (unlikely(ret
!= 0))
689 if (rcache
->valid
&& rcache
->res
== res
) {
690 rcache
->valid_handle
= true;
691 rcache
->handle
= *id_loc
;
695 ret
= vmw_resource_relocation_add(sw_context
, res
,
696 vmw_ptr_diff(sw_context
->buf_start
,
706 * vmw_rebind_dx_query - Rebind DX query associated with the context
708 * @ctx_res: context the query belongs to
710 * This function assumes binding_mutex is held.
712 static int vmw_rebind_all_dx_query(struct vmw_resource
*ctx_res
)
714 struct vmw_private
*dev_priv
= ctx_res
->dev_priv
;
715 struct vmw_buffer_object
*dx_query_mob
;
716 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXBindAllQuery
);
718 dx_query_mob
= vmw_context_get_dx_query_mob(ctx_res
);
720 if (!dx_query_mob
|| dx_query_mob
->dx_query_ctx
)
723 cmd
= VMW_FIFO_RESERVE_DX(dev_priv
, sizeof(*cmd
), ctx_res
->id
);
727 cmd
->header
.id
= SVGA_3D_CMD_DX_BIND_ALL_QUERY
;
728 cmd
->header
.size
= sizeof(cmd
->body
);
729 cmd
->body
.cid
= ctx_res
->id
;
730 cmd
->body
.mobid
= dx_query_mob
->base
.mem
.start
;
731 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
733 vmw_context_bind_dx_query(ctx_res
, dx_query_mob
);
739 * vmw_rebind_contexts - Rebind all resources previously bound to referenced
742 * @sw_context: Pointer to the software context.
744 * Rebind context binding points that have been scrubbed because of eviction.
746 static int vmw_rebind_contexts(struct vmw_sw_context
*sw_context
)
748 struct vmw_ctx_validation_info
*val
;
751 list_for_each_entry(val
, &sw_context
->ctx_list
, head
) {
752 ret
= vmw_binding_rebind_all(val
->cur
);
753 if (unlikely(ret
!= 0)) {
754 if (ret
!= -ERESTARTSYS
)
755 VMW_DEBUG_USER("Failed to rebind context.\n");
759 ret
= vmw_rebind_all_dx_query(val
->ctx
);
761 VMW_DEBUG_USER("Failed to rebind queries.\n");
770 * vmw_view_bindings_add - Add an array of view bindings to a context binding
773 * @sw_context: The execbuf state used for this command.
774 * @view_type: View type for the bindings.
775 * @binding_type: Binding type for the bindings.
776 * @shader_slot: The shader slot to user for the bindings.
777 * @view_ids: Array of view ids to be bound.
778 * @num_views: Number of view ids in @view_ids.
779 * @first_slot: The binding slot to be used for the first view id in @view_ids.
781 static int vmw_view_bindings_add(struct vmw_sw_context
*sw_context
,
782 enum vmw_view_type view_type
,
783 enum vmw_ctx_binding_type binding_type
,
785 uint32 view_ids
[], u32 num_views
,
788 struct vmw_ctx_validation_info
*ctx_node
= VMW_GET_CTX_NODE(sw_context
);
794 for (i
= 0; i
< num_views
; ++i
) {
795 struct vmw_ctx_bindinfo_view binding
;
796 struct vmw_resource
*view
= NULL
;
798 if (view_ids
[i
] != SVGA3D_INVALID_ID
) {
799 view
= vmw_view_id_val_add(sw_context
, view_type
,
802 VMW_DEBUG_USER("View not found.\n");
803 return PTR_ERR(view
);
806 binding
.bi
.ctx
= ctx_node
->ctx
;
807 binding
.bi
.res
= view
;
808 binding
.bi
.bt
= binding_type
;
809 binding
.shader_slot
= shader_slot
;
810 binding
.slot
= first_slot
+ i
;
811 vmw_binding_add(ctx_node
->staged
, &binding
.bi
,
812 shader_slot
, binding
.slot
);
819 * vmw_cmd_cid_check - Check a command header for valid context information.
821 * @dev_priv: Pointer to a device private structure.
822 * @sw_context: Pointer to the software context.
823 * @header: A command header with an embedded user-space context handle.
825 * Convenience function: Call vmw_cmd_res_check with the user-space context
826 * handle embedded in @header.
828 static int vmw_cmd_cid_check(struct vmw_private
*dev_priv
,
829 struct vmw_sw_context
*sw_context
,
830 SVGA3dCmdHeader
*header
)
832 VMW_DECLARE_CMD_VAR(*cmd
, uint32_t) =
833 container_of(header
, typeof(*cmd
), header
);
835 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
836 VMW_RES_DIRTY_SET
, user_context_converter
,
841 * vmw_execbuf_info_from_res - Get the private validation metadata for a
842 * recently validated resource
844 * @sw_context: Pointer to the command submission context
847 * The resource pointed to by @res needs to be present in the command submission
848 * context's resource cache and hence the last resource of that type to be
849 * processed by the validation code.
851 * Return: a pointer to the private metadata of the resource, or NULL if it
854 static struct vmw_ctx_validation_info
*
855 vmw_execbuf_info_from_res(struct vmw_sw_context
*sw_context
,
856 struct vmw_resource
*res
)
858 struct vmw_res_cache_entry
*rcache
=
859 &sw_context
->res_cache
[vmw_res_type(res
)];
861 if (rcache
->valid
&& rcache
->res
== res
)
862 return rcache
->private;
868 static int vmw_cmd_set_render_target_check(struct vmw_private
*dev_priv
,
869 struct vmw_sw_context
*sw_context
,
870 SVGA3dCmdHeader
*header
)
872 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdSetRenderTarget
);
873 struct vmw_resource
*ctx
;
874 struct vmw_resource
*res
;
877 cmd
= container_of(header
, typeof(*cmd
), header
);
879 if (cmd
->body
.type
>= SVGA3D_RT_MAX
) {
880 VMW_DEBUG_USER("Illegal render target type %u.\n",
881 (unsigned int) cmd
->body
.type
);
885 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
886 VMW_RES_DIRTY_SET
, user_context_converter
,
887 &cmd
->body
.cid
, &ctx
);
888 if (unlikely(ret
!= 0))
891 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
892 VMW_RES_DIRTY_SET
, user_surface_converter
,
893 &cmd
->body
.target
.sid
, &res
);
897 if (dev_priv
->has_mob
) {
898 struct vmw_ctx_bindinfo_view binding
;
899 struct vmw_ctx_validation_info
*node
;
901 node
= vmw_execbuf_info_from_res(sw_context
, ctx
);
905 binding
.bi
.ctx
= ctx
;
906 binding
.bi
.res
= res
;
907 binding
.bi
.bt
= vmw_ctx_binding_rt
;
908 binding
.slot
= cmd
->body
.type
;
909 vmw_binding_add(node
->staged
, &binding
.bi
, 0, binding
.slot
);
915 static int vmw_cmd_surface_copy_check(struct vmw_private
*dev_priv
,
916 struct vmw_sw_context
*sw_context
,
917 SVGA3dCmdHeader
*header
)
919 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdSurfaceCopy
);
922 cmd
= container_of(header
, typeof(*cmd
), header
);
924 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
925 VMW_RES_DIRTY_NONE
, user_surface_converter
,
926 &cmd
->body
.src
.sid
, NULL
);
930 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
931 VMW_RES_DIRTY_SET
, user_surface_converter
,
932 &cmd
->body
.dest
.sid
, NULL
);
935 static int vmw_cmd_buffer_copy_check(struct vmw_private
*dev_priv
,
936 struct vmw_sw_context
*sw_context
,
937 SVGA3dCmdHeader
*header
)
939 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXBufferCopy
);
942 cmd
= container_of(header
, typeof(*cmd
), header
);
943 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
944 VMW_RES_DIRTY_NONE
, user_surface_converter
,
945 &cmd
->body
.src
, NULL
);
949 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
950 VMW_RES_DIRTY_SET
, user_surface_converter
,
951 &cmd
->body
.dest
, NULL
);
954 static int vmw_cmd_pred_copy_check(struct vmw_private
*dev_priv
,
955 struct vmw_sw_context
*sw_context
,
956 SVGA3dCmdHeader
*header
)
958 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXPredCopyRegion
);
961 cmd
= container_of(header
, typeof(*cmd
), header
);
962 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
963 VMW_RES_DIRTY_NONE
, user_surface_converter
,
964 &cmd
->body
.srcSid
, NULL
);
968 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
969 VMW_RES_DIRTY_SET
, user_surface_converter
,
970 &cmd
->body
.dstSid
, NULL
);
973 static int vmw_cmd_stretch_blt_check(struct vmw_private
*dev_priv
,
974 struct vmw_sw_context
*sw_context
,
975 SVGA3dCmdHeader
*header
)
977 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdSurfaceStretchBlt
);
980 cmd
= container_of(header
, typeof(*cmd
), header
);
981 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
982 VMW_RES_DIRTY_NONE
, user_surface_converter
,
983 &cmd
->body
.src
.sid
, NULL
);
984 if (unlikely(ret
!= 0))
987 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
988 VMW_RES_DIRTY_SET
, user_surface_converter
,
989 &cmd
->body
.dest
.sid
, NULL
);
992 static int vmw_cmd_blt_surf_screen_check(struct vmw_private
*dev_priv
,
993 struct vmw_sw_context
*sw_context
,
994 SVGA3dCmdHeader
*header
)
996 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdBlitSurfaceToScreen
) =
997 container_of(header
, typeof(*cmd
), header
);
999 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1000 VMW_RES_DIRTY_NONE
, user_surface_converter
,
1001 &cmd
->body
.srcImage
.sid
, NULL
);
1004 static int vmw_cmd_present_check(struct vmw_private
*dev_priv
,
1005 struct vmw_sw_context
*sw_context
,
1006 SVGA3dCmdHeader
*header
)
1008 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdPresent
) =
1009 container_of(header
, typeof(*cmd
), header
);
1011 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1012 VMW_RES_DIRTY_NONE
, user_surface_converter
,
1013 &cmd
->body
.sid
, NULL
);
1017 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1019 * @dev_priv: The device private structure.
1020 * @new_query_bo: The new buffer holding query results.
1021 * @sw_context: The software context used for this command submission.
1023 * This function checks whether @new_query_bo is suitable for holding query
1024 * results, and if another buffer currently is pinned for query results. If so,
1025 * the function prepares the state of @sw_context for switching pinned buffers
1026 * after successful submission of the current command batch.
1028 static int vmw_query_bo_switch_prepare(struct vmw_private
*dev_priv
,
1029 struct vmw_buffer_object
*new_query_bo
,
1030 struct vmw_sw_context
*sw_context
)
1032 struct vmw_res_cache_entry
*ctx_entry
=
1033 &sw_context
->res_cache
[vmw_res_context
];
1036 BUG_ON(!ctx_entry
->valid
);
1037 sw_context
->last_query_ctx
= ctx_entry
->res
;
1039 if (unlikely(new_query_bo
!= sw_context
->cur_query_bo
)) {
1041 if (unlikely(new_query_bo
->base
.num_pages
> 4)) {
1042 VMW_DEBUG_USER("Query buffer too large.\n");
1046 if (unlikely(sw_context
->cur_query_bo
!= NULL
)) {
1047 sw_context
->needs_post_query_barrier
= true;
1048 ret
= vmw_validation_add_bo(sw_context
->ctx
,
1049 sw_context
->cur_query_bo
,
1050 dev_priv
->has_mob
, false);
1051 if (unlikely(ret
!= 0))
1054 sw_context
->cur_query_bo
= new_query_bo
;
1056 ret
= vmw_validation_add_bo(sw_context
->ctx
,
1057 dev_priv
->dummy_query_bo
,
1058 dev_priv
->has_mob
, false);
1059 if (unlikely(ret
!= 0))
1067 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1069 * @dev_priv: The device private structure.
1070 * @sw_context: The software context used for this command submission batch.
1072 * This function will check if we're switching query buffers, and will then,
1073 * issue a dummy occlusion query wait used as a query barrier. When the fence
1074 * object following that query wait has signaled, we are sure that all preceding
1075 * queries have finished, and the old query buffer can be unpinned. However,
1076 * since both the new query buffer and the old one are fenced with that fence,
1077 * we can do an asynchronus unpin now, and be sure that the old query buffer
1078 * won't be moved until the fence has signaled.
1080 * As mentioned above, both the new - and old query buffers need to be fenced
1081 * using a sequence emitted *after* calling this function.
1083 static void vmw_query_bo_switch_commit(struct vmw_private
*dev_priv
,
1084 struct vmw_sw_context
*sw_context
)
1087 * The validate list should still hold references to all
1090 if (sw_context
->needs_post_query_barrier
) {
1091 struct vmw_res_cache_entry
*ctx_entry
=
1092 &sw_context
->res_cache
[vmw_res_context
];
1093 struct vmw_resource
*ctx
;
1096 BUG_ON(!ctx_entry
->valid
);
1097 ctx
= ctx_entry
->res
;
1099 ret
= vmw_fifo_emit_dummy_query(dev_priv
, ctx
->id
);
1101 if (unlikely(ret
!= 0))
1102 VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1105 if (dev_priv
->pinned_bo
!= sw_context
->cur_query_bo
) {
1106 if (dev_priv
->pinned_bo
) {
1107 vmw_bo_pin_reserved(dev_priv
->pinned_bo
, false);
1108 vmw_bo_unreference(&dev_priv
->pinned_bo
);
1111 if (!sw_context
->needs_post_query_barrier
) {
1112 vmw_bo_pin_reserved(sw_context
->cur_query_bo
, true);
1115 * We pin also the dummy_query_bo buffer so that we
1116 * don't need to validate it when emitting dummy queries
1117 * in context destroy paths.
1119 if (!dev_priv
->dummy_query_bo_pinned
) {
1120 vmw_bo_pin_reserved(dev_priv
->dummy_query_bo
,
1122 dev_priv
->dummy_query_bo_pinned
= true;
1125 BUG_ON(sw_context
->last_query_ctx
== NULL
);
1126 dev_priv
->query_cid
= sw_context
->last_query_ctx
->id
;
1127 dev_priv
->query_cid_valid
= true;
1128 dev_priv
->pinned_bo
=
1129 vmw_bo_reference(sw_context
->cur_query_bo
);
1135 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer handle
1138 * @dev_priv: Pointer to a device private structure.
1139 * @sw_context: The software context used for this command batch validation.
1140 * @id: Pointer to the user-space handle to be translated.
1141 * @vmw_bo_p: Points to a location that, on successful return will carry a
1142 * non-reference-counted pointer to the buffer object identified by the
1143 * user-space handle in @id.
1145 * This function saves information needed to translate a user-space buffer
1146 * handle to a MOB id. The translation does not take place immediately, but
1147 * during a call to vmw_apply_relocations().
1149 * This function builds a relocation list and a list of buffers to validate. The
1150 * former needs to be freed using either vmw_apply_relocations() or
1151 * vmw_free_relocations(). The latter needs to be freed using
1152 * vmw_clear_validations.
1154 static int vmw_translate_mob_ptr(struct vmw_private
*dev_priv
,
1155 struct vmw_sw_context
*sw_context
,
1157 struct vmw_buffer_object
**vmw_bo_p
)
1159 struct vmw_buffer_object
*vmw_bo
;
1160 uint32_t handle
= *id
;
1161 struct vmw_relocation
*reloc
;
1164 vmw_validation_preload_bo(sw_context
->ctx
);
1165 vmw_bo
= vmw_user_bo_noref_lookup(sw_context
->fp
->tfile
, handle
);
1166 if (IS_ERR(vmw_bo
)) {
1167 VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
1168 return PTR_ERR(vmw_bo
);
1171 ret
= vmw_validation_add_bo(sw_context
->ctx
, vmw_bo
, true, false);
1172 vmw_user_bo_noref_release();
1173 if (unlikely(ret
!= 0))
1176 reloc
= vmw_validation_mem_alloc(sw_context
->ctx
, sizeof(*reloc
));
1180 reloc
->mob_loc
= id
;
1181 reloc
->vbo
= vmw_bo
;
1184 list_add_tail(&reloc
->head
, &sw_context
->bo_relocations
);
1190 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer handle
1191 * to a valid SVGAGuestPtr
1193 * @dev_priv: Pointer to a device private structure.
1194 * @sw_context: The software context used for this command batch validation.
1195 * @ptr: Pointer to the user-space handle to be translated.
1196 * @vmw_bo_p: Points to a location that, on successful return will carry a
1197 * non-reference-counted pointer to the DMA buffer identified by the user-space
1200 * This function saves information needed to translate a user-space buffer
1201 * handle to a valid SVGAGuestPtr. The translation does not take place
1202 * immediately, but during a call to vmw_apply_relocations().
1204 * This function builds a relocation list and a list of buffers to validate.
1205 * The former needs to be freed using either vmw_apply_relocations() or
1206 * vmw_free_relocations(). The latter needs to be freed using
1207 * vmw_clear_validations.
1209 static int vmw_translate_guest_ptr(struct vmw_private
*dev_priv
,
1210 struct vmw_sw_context
*sw_context
,
1212 struct vmw_buffer_object
**vmw_bo_p
)
1214 struct vmw_buffer_object
*vmw_bo
;
1215 uint32_t handle
= ptr
->gmrId
;
1216 struct vmw_relocation
*reloc
;
1219 vmw_validation_preload_bo(sw_context
->ctx
);
1220 vmw_bo
= vmw_user_bo_noref_lookup(sw_context
->fp
->tfile
, handle
);
1221 if (IS_ERR(vmw_bo
)) {
1222 VMW_DEBUG_USER("Could not find or use GMR region.\n");
1223 return PTR_ERR(vmw_bo
);
1226 ret
= vmw_validation_add_bo(sw_context
->ctx
, vmw_bo
, false, false);
1227 vmw_user_bo_noref_release();
1228 if (unlikely(ret
!= 0))
1231 reloc
= vmw_validation_mem_alloc(sw_context
->ctx
, sizeof(*reloc
));
1235 reloc
->location
= ptr
;
1236 reloc
->vbo
= vmw_bo
;
1238 list_add_tail(&reloc
->head
, &sw_context
->bo_relocations
);
1244 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1246 * @dev_priv: Pointer to a device private struct.
1247 * @sw_context: The software context used for this command submission.
1248 * @header: Pointer to the command header in the command stream.
1250 * This function adds the new query into the query COTABLE
1252 static int vmw_cmd_dx_define_query(struct vmw_private
*dev_priv
,
1253 struct vmw_sw_context
*sw_context
,
1254 SVGA3dCmdHeader
*header
)
1256 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXDefineQuery
);
1257 struct vmw_ctx_validation_info
*ctx_node
= VMW_GET_CTX_NODE(sw_context
);
1258 struct vmw_resource
*cotable_res
;
1264 cmd
= container_of(header
, typeof(*cmd
), header
);
1266 if (cmd
->body
.type
< SVGA3D_QUERYTYPE_MIN
||
1267 cmd
->body
.type
>= SVGA3D_QUERYTYPE_MAX
)
1270 cotable_res
= vmw_context_cotable(ctx_node
->ctx
, SVGA_COTABLE_DXQUERY
);
1271 ret
= vmw_cotable_notify(cotable_res
, cmd
->body
.queryId
);
1277 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1279 * @dev_priv: Pointer to a device private struct.
1280 * @sw_context: The software context used for this command submission.
1281 * @header: Pointer to the command header in the command stream.
1283 * The query bind operation will eventually associate the query ID with its
1284 * backing MOB. In this function, we take the user mode MOB ID and use
1285 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1287 static int vmw_cmd_dx_bind_query(struct vmw_private
*dev_priv
,
1288 struct vmw_sw_context
*sw_context
,
1289 SVGA3dCmdHeader
*header
)
1291 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXBindQuery
);
1292 struct vmw_buffer_object
*vmw_bo
;
1295 cmd
= container_of(header
, typeof(*cmd
), header
);
1298 * Look up the buffer pointed to by q.mobid, put it on the relocation
1299 * list so its kernel mode MOB ID can be filled in later
1301 ret
= vmw_translate_mob_ptr(dev_priv
, sw_context
, &cmd
->body
.mobid
,
1307 sw_context
->dx_query_mob
= vmw_bo
;
1308 sw_context
->dx_query_ctx
= sw_context
->dx_ctx_node
->ctx
;
1313 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1315 * @dev_priv: Pointer to a device private struct.
1316 * @sw_context: The software context used for this command submission.
1317 * @header: Pointer to the command header in the command stream.
1319 static int vmw_cmd_begin_gb_query(struct vmw_private
*dev_priv
,
1320 struct vmw_sw_context
*sw_context
,
1321 SVGA3dCmdHeader
*header
)
1323 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdBeginGBQuery
) =
1324 container_of(header
, typeof(*cmd
), header
);
1326 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
1327 VMW_RES_DIRTY_SET
, user_context_converter
,
1328 &cmd
->body
.cid
, NULL
);
1332 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1334 * @dev_priv: Pointer to a device private struct.
1335 * @sw_context: The software context used for this command submission.
1336 * @header: Pointer to the command header in the command stream.
1338 static int vmw_cmd_begin_query(struct vmw_private
*dev_priv
,
1339 struct vmw_sw_context
*sw_context
,
1340 SVGA3dCmdHeader
*header
)
1342 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdBeginQuery
) =
1343 container_of(header
, typeof(*cmd
), header
);
1345 if (unlikely(dev_priv
->has_mob
)) {
1346 VMW_DECLARE_CMD_VAR(gb_cmd
, SVGA3dCmdBeginGBQuery
);
1348 BUG_ON(sizeof(gb_cmd
) != sizeof(*cmd
));
1350 gb_cmd
.header
.id
= SVGA_3D_CMD_BEGIN_GB_QUERY
;
1351 gb_cmd
.header
.size
= cmd
->header
.size
;
1352 gb_cmd
.body
.cid
= cmd
->body
.cid
;
1353 gb_cmd
.body
.type
= cmd
->body
.type
;
1355 memcpy(cmd
, &gb_cmd
, sizeof(*cmd
));
1356 return vmw_cmd_begin_gb_query(dev_priv
, sw_context
, header
);
1359 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
1360 VMW_RES_DIRTY_SET
, user_context_converter
,
1361 &cmd
->body
.cid
, NULL
);
1365 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1367 * @dev_priv: Pointer to a device private struct.
1368 * @sw_context: The software context used for this command submission.
1369 * @header: Pointer to the command header in the command stream.
1371 static int vmw_cmd_end_gb_query(struct vmw_private
*dev_priv
,
1372 struct vmw_sw_context
*sw_context
,
1373 SVGA3dCmdHeader
*header
)
1375 struct vmw_buffer_object
*vmw_bo
;
1376 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdEndGBQuery
);
1379 cmd
= container_of(header
, typeof(*cmd
), header
);
1380 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1381 if (unlikely(ret
!= 0))
1384 ret
= vmw_translate_mob_ptr(dev_priv
, sw_context
, &cmd
->body
.mobid
,
1386 if (unlikely(ret
!= 0))
1389 ret
= vmw_query_bo_switch_prepare(dev_priv
, vmw_bo
, sw_context
);
1395 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1397 * @dev_priv: Pointer to a device private struct.
1398 * @sw_context: The software context used for this command submission.
1399 * @header: Pointer to the command header in the command stream.
1401 static int vmw_cmd_end_query(struct vmw_private
*dev_priv
,
1402 struct vmw_sw_context
*sw_context
,
1403 SVGA3dCmdHeader
*header
)
1405 struct vmw_buffer_object
*vmw_bo
;
1406 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdEndQuery
);
1409 cmd
= container_of(header
, typeof(*cmd
), header
);
1410 if (dev_priv
->has_mob
) {
1411 VMW_DECLARE_CMD_VAR(gb_cmd
, SVGA3dCmdEndGBQuery
);
1413 BUG_ON(sizeof(gb_cmd
) != sizeof(*cmd
));
1415 gb_cmd
.header
.id
= SVGA_3D_CMD_END_GB_QUERY
;
1416 gb_cmd
.header
.size
= cmd
->header
.size
;
1417 gb_cmd
.body
.cid
= cmd
->body
.cid
;
1418 gb_cmd
.body
.type
= cmd
->body
.type
;
1419 gb_cmd
.body
.mobid
= cmd
->body
.guestResult
.gmrId
;
1420 gb_cmd
.body
.offset
= cmd
->body
.guestResult
.offset
;
1422 memcpy(cmd
, &gb_cmd
, sizeof(*cmd
));
1423 return vmw_cmd_end_gb_query(dev_priv
, sw_context
, header
);
1426 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1427 if (unlikely(ret
!= 0))
1430 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
1431 &cmd
->body
.guestResult
, &vmw_bo
);
1432 if (unlikely(ret
!= 0))
1435 ret
= vmw_query_bo_switch_prepare(dev_priv
, vmw_bo
, sw_context
);
1441 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1443 * @dev_priv: Pointer to a device private struct.
1444 * @sw_context: The software context used for this command submission.
1445 * @header: Pointer to the command header in the command stream.
1447 static int vmw_cmd_wait_gb_query(struct vmw_private
*dev_priv
,
1448 struct vmw_sw_context
*sw_context
,
1449 SVGA3dCmdHeader
*header
)
1451 struct vmw_buffer_object
*vmw_bo
;
1452 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdWaitForGBQuery
);
1455 cmd
= container_of(header
, typeof(*cmd
), header
);
1456 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1457 if (unlikely(ret
!= 0))
1460 ret
= vmw_translate_mob_ptr(dev_priv
, sw_context
, &cmd
->body
.mobid
,
1462 if (unlikely(ret
!= 0))
1469 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1471 * @dev_priv: Pointer to a device private struct.
1472 * @sw_context: The software context used for this command submission.
1473 * @header: Pointer to the command header in the command stream.
1475 static int vmw_cmd_wait_query(struct vmw_private
*dev_priv
,
1476 struct vmw_sw_context
*sw_context
,
1477 SVGA3dCmdHeader
*header
)
1479 struct vmw_buffer_object
*vmw_bo
;
1480 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdWaitForQuery
);
1483 cmd
= container_of(header
, typeof(*cmd
), header
);
1484 if (dev_priv
->has_mob
) {
1485 VMW_DECLARE_CMD_VAR(gb_cmd
, SVGA3dCmdWaitForGBQuery
);
1487 BUG_ON(sizeof(gb_cmd
) != sizeof(*cmd
));
1489 gb_cmd
.header
.id
= SVGA_3D_CMD_WAIT_FOR_GB_QUERY
;
1490 gb_cmd
.header
.size
= cmd
->header
.size
;
1491 gb_cmd
.body
.cid
= cmd
->body
.cid
;
1492 gb_cmd
.body
.type
= cmd
->body
.type
;
1493 gb_cmd
.body
.mobid
= cmd
->body
.guestResult
.gmrId
;
1494 gb_cmd
.body
.offset
= cmd
->body
.guestResult
.offset
;
1496 memcpy(cmd
, &gb_cmd
, sizeof(*cmd
));
1497 return vmw_cmd_wait_gb_query(dev_priv
, sw_context
, header
);
1500 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1501 if (unlikely(ret
!= 0))
1504 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
1505 &cmd
->body
.guestResult
, &vmw_bo
);
1506 if (unlikely(ret
!= 0))
1512 static int vmw_cmd_dma(struct vmw_private
*dev_priv
,
1513 struct vmw_sw_context
*sw_context
,
1514 SVGA3dCmdHeader
*header
)
1516 struct vmw_buffer_object
*vmw_bo
= NULL
;
1517 struct vmw_surface
*srf
= NULL
;
1518 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdSurfaceDMA
);
1520 SVGA3dCmdSurfaceDMASuffix
*suffix
;
1524 cmd
= container_of(header
, typeof(*cmd
), header
);
1525 suffix
= (SVGA3dCmdSurfaceDMASuffix
*)((unsigned long) &cmd
->body
+
1526 header
->size
- sizeof(*suffix
));
1528 /* Make sure device and verifier stays in sync. */
1529 if (unlikely(suffix
->suffixSize
!= sizeof(*suffix
))) {
1530 VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1534 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
1535 &cmd
->body
.guest
.ptr
, &vmw_bo
);
1536 if (unlikely(ret
!= 0))
1539 /* Make sure DMA doesn't cross BO boundaries. */
1540 bo_size
= vmw_bo
->base
.num_pages
* PAGE_SIZE
;
1541 if (unlikely(cmd
->body
.guest
.ptr
.offset
> bo_size
)) {
1542 VMW_DEBUG_USER("Invalid DMA offset.\n");
1546 bo_size
-= cmd
->body
.guest
.ptr
.offset
;
1547 if (unlikely(suffix
->maximumOffset
> bo_size
))
1548 suffix
->maximumOffset
= bo_size
;
1550 dirty
= (cmd
->body
.transfer
== SVGA3D_WRITE_HOST_VRAM
) ?
1551 VMW_RES_DIRTY_SET
: 0;
1552 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1553 dirty
, user_surface_converter
,
1554 &cmd
->body
.host
.sid
, NULL
);
1555 if (unlikely(ret
!= 0)) {
1556 if (unlikely(ret
!= -ERESTARTSYS
))
1557 VMW_DEBUG_USER("could not find surface for DMA.\n");
1561 srf
= vmw_res_to_srf(sw_context
->res_cache
[vmw_res_surface
].res
);
1563 vmw_kms_cursor_snoop(srf
, sw_context
->fp
->tfile
, &vmw_bo
->base
, header
);
1568 static int vmw_cmd_draw(struct vmw_private
*dev_priv
,
1569 struct vmw_sw_context
*sw_context
,
1570 SVGA3dCmdHeader
*header
)
1572 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDrawPrimitives
);
1573 SVGA3dVertexDecl
*decl
= (SVGA3dVertexDecl
*)(
1574 (unsigned long)header
+ sizeof(*cmd
));
1575 SVGA3dPrimitiveRange
*range
;
1580 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1581 if (unlikely(ret
!= 0))
1584 cmd
= container_of(header
, typeof(*cmd
), header
);
1585 maxnum
= (header
->size
- sizeof(cmd
->body
)) / sizeof(*decl
);
1587 if (unlikely(cmd
->body
.numVertexDecls
> maxnum
)) {
1588 VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1592 for (i
= 0; i
< cmd
->body
.numVertexDecls
; ++i
, ++decl
) {
1593 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1595 user_surface_converter
,
1596 &decl
->array
.surfaceId
, NULL
);
1597 if (unlikely(ret
!= 0))
1601 maxnum
= (header
->size
- sizeof(cmd
->body
) -
1602 cmd
->body
.numVertexDecls
* sizeof(*decl
)) / sizeof(*range
);
1603 if (unlikely(cmd
->body
.numRanges
> maxnum
)) {
1604 VMW_DEBUG_USER("Illegal number of index ranges.\n");
1608 range
= (SVGA3dPrimitiveRange
*) decl
;
1609 for (i
= 0; i
< cmd
->body
.numRanges
; ++i
, ++range
) {
1610 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1612 user_surface_converter
,
1613 &range
->indexArray
.surfaceId
, NULL
);
1614 if (unlikely(ret
!= 0))
1620 static int vmw_cmd_tex_state(struct vmw_private
*dev_priv
,
1621 struct vmw_sw_context
*sw_context
,
1622 SVGA3dCmdHeader
*header
)
1624 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdSetTextureState
);
1625 SVGA3dTextureState
*last_state
= (SVGA3dTextureState
*)
1626 ((unsigned long) header
+ header
->size
+ sizeof(header
));
1627 SVGA3dTextureState
*cur_state
= (SVGA3dTextureState
*)
1628 ((unsigned long) header
+ sizeof(*cmd
));
1629 struct vmw_resource
*ctx
;
1630 struct vmw_resource
*res
;
1633 cmd
= container_of(header
, typeof(*cmd
), header
);
1635 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
1636 VMW_RES_DIRTY_SET
, user_context_converter
,
1637 &cmd
->body
.cid
, &ctx
);
1638 if (unlikely(ret
!= 0))
1641 for (; cur_state
< last_state
; ++cur_state
) {
1642 if (likely(cur_state
->name
!= SVGA3D_TS_BIND_TEXTURE
))
1645 if (cur_state
->stage
>= SVGA3D_NUM_TEXTURE_UNITS
) {
1646 VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1647 (unsigned int) cur_state
->stage
);
1651 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1653 user_surface_converter
,
1654 &cur_state
->value
, &res
);
1655 if (unlikely(ret
!= 0))
1658 if (dev_priv
->has_mob
) {
1659 struct vmw_ctx_bindinfo_tex binding
;
1660 struct vmw_ctx_validation_info
*node
;
1662 node
= vmw_execbuf_info_from_res(sw_context
, ctx
);
1666 binding
.bi
.ctx
= ctx
;
1667 binding
.bi
.res
= res
;
1668 binding
.bi
.bt
= vmw_ctx_binding_tex
;
1669 binding
.texture_stage
= cur_state
->stage
;
1670 vmw_binding_add(node
->staged
, &binding
.bi
, 0,
1671 binding
.texture_stage
);
1678 static int vmw_cmd_check_define_gmrfb(struct vmw_private
*dev_priv
,
1679 struct vmw_sw_context
*sw_context
,
1682 struct vmw_buffer_object
*vmw_bo
;
1686 SVGAFifoCmdDefineGMRFB body
;
1689 return vmw_translate_guest_ptr(dev_priv
, sw_context
, &cmd
->body
.ptr
,
1694 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1697 * @dev_priv: Pointer to a device private struct.
1698 * @sw_context: The software context being used for this batch.
1699 * @val_node: The validation node representing the resource.
1700 * @buf_id: Pointer to the user-space backup buffer handle in the command
1702 * @backup_offset: Offset of backup into MOB.
1704 * This function prepares for registering a switch of backup buffers in the
1705 * resource metadata just prior to unreserving. It's basically a wrapper around
1706 * vmw_cmd_res_switch_backup with a different interface.
1708 static int vmw_cmd_res_switch_backup(struct vmw_private
*dev_priv
,
1709 struct vmw_sw_context
*sw_context
,
1710 struct vmw_resource
*res
, uint32_t *buf_id
,
1711 unsigned long backup_offset
)
1713 struct vmw_buffer_object
*vbo
;
1717 info
= vmw_execbuf_info_from_res(sw_context
, res
);
1721 ret
= vmw_translate_mob_ptr(dev_priv
, sw_context
, buf_id
, &vbo
);
1725 vmw_validation_res_switch_backup(sw_context
->ctx
, info
, vbo
,
1731 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1733 * @dev_priv: Pointer to a device private struct.
1734 * @sw_context: The software context being used for this batch.
1735 * @res_type: The resource type.
1736 * @converter: Information about user-space binding for this resource type.
1737 * @res_id: Pointer to the user-space resource handle in the command stream.
1738 * @buf_id: Pointer to the user-space backup buffer handle in the command
1740 * @backup_offset: Offset of backup into MOB.
1742 * This function prepares for registering a switch of backup buffers in the
1743 * resource metadata just prior to unreserving. It's basically a wrapper around
1744 * vmw_cmd_res_switch_backup with a different interface.
1746 static int vmw_cmd_switch_backup(struct vmw_private
*dev_priv
,
1747 struct vmw_sw_context
*sw_context
,
1748 enum vmw_res_type res_type
,
1749 const struct vmw_user_resource_conv
1750 *converter
, uint32_t *res_id
, uint32_t *buf_id
,
1751 unsigned long backup_offset
)
1753 struct vmw_resource
*res
;
1756 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, res_type
,
1757 VMW_RES_DIRTY_NONE
, converter
, res_id
, &res
);
1761 return vmw_cmd_res_switch_backup(dev_priv
, sw_context
, res
, buf_id
,
1766 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
1768 * @dev_priv: Pointer to a device private struct.
1769 * @sw_context: The software context being used for this batch.
1770 * @header: Pointer to the command header in the command stream.
1772 static int vmw_cmd_bind_gb_surface(struct vmw_private
*dev_priv
,
1773 struct vmw_sw_context
*sw_context
,
1774 SVGA3dCmdHeader
*header
)
1776 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdBindGBSurface
) =
1777 container_of(header
, typeof(*cmd
), header
);
1779 return vmw_cmd_switch_backup(dev_priv
, sw_context
, vmw_res_surface
,
1780 user_surface_converter
, &cmd
->body
.sid
,
1781 &cmd
->body
.mobid
, 0);
1785 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
1787 * @dev_priv: Pointer to a device private struct.
1788 * @sw_context: The software context being used for this batch.
1789 * @header: Pointer to the command header in the command stream.
1791 static int vmw_cmd_update_gb_image(struct vmw_private
*dev_priv
,
1792 struct vmw_sw_context
*sw_context
,
1793 SVGA3dCmdHeader
*header
)
1795 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdUpdateGBImage
) =
1796 container_of(header
, typeof(*cmd
), header
);
1798 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1799 VMW_RES_DIRTY_NONE
, user_surface_converter
,
1800 &cmd
->body
.image
.sid
, NULL
);
1804 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
1806 * @dev_priv: Pointer to a device private struct.
1807 * @sw_context: The software context being used for this batch.
1808 * @header: Pointer to the command header in the command stream.
1810 static int vmw_cmd_update_gb_surface(struct vmw_private
*dev_priv
,
1811 struct vmw_sw_context
*sw_context
,
1812 SVGA3dCmdHeader
*header
)
1814 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdUpdateGBSurface
) =
1815 container_of(header
, typeof(*cmd
), header
);
1817 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1818 VMW_RES_DIRTY_CLEAR
, user_surface_converter
,
1819 &cmd
->body
.sid
, NULL
);
1823 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
1825 * @dev_priv: Pointer to a device private struct.
1826 * @sw_context: The software context being used for this batch.
1827 * @header: Pointer to the command header in the command stream.
1829 static int vmw_cmd_readback_gb_image(struct vmw_private
*dev_priv
,
1830 struct vmw_sw_context
*sw_context
,
1831 SVGA3dCmdHeader
*header
)
1833 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdReadbackGBImage
) =
1834 container_of(header
, typeof(*cmd
), header
);
1836 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1837 VMW_RES_DIRTY_NONE
, user_surface_converter
,
1838 &cmd
->body
.image
.sid
, NULL
);
1842 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1845 * @dev_priv: Pointer to a device private struct.
1846 * @sw_context: The software context being used for this batch.
1847 * @header: Pointer to the command header in the command stream.
1849 static int vmw_cmd_readback_gb_surface(struct vmw_private
*dev_priv
,
1850 struct vmw_sw_context
*sw_context
,
1851 SVGA3dCmdHeader
*header
)
1853 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdReadbackGBSurface
) =
1854 container_of(header
, typeof(*cmd
), header
);
1856 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1857 VMW_RES_DIRTY_CLEAR
, user_surface_converter
,
1858 &cmd
->body
.sid
, NULL
);
1862 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1865 * @dev_priv: Pointer to a device private struct.
1866 * @sw_context: The software context being used for this batch.
1867 * @header: Pointer to the command header in the command stream.
1869 static int vmw_cmd_invalidate_gb_image(struct vmw_private
*dev_priv
,
1870 struct vmw_sw_context
*sw_context
,
1871 SVGA3dCmdHeader
*header
)
1873 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdInvalidateGBImage
) =
1874 container_of(header
, typeof(*cmd
), header
);
1876 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1877 VMW_RES_DIRTY_NONE
, user_surface_converter
,
1878 &cmd
->body
.image
.sid
, NULL
);
1882 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1885 * @dev_priv: Pointer to a device private struct.
1886 * @sw_context: The software context being used for this batch.
1887 * @header: Pointer to the command header in the command stream.
1889 static int vmw_cmd_invalidate_gb_surface(struct vmw_private
*dev_priv
,
1890 struct vmw_sw_context
*sw_context
,
1891 SVGA3dCmdHeader
*header
)
1893 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdInvalidateGBSurface
) =
1894 container_of(header
, typeof(*cmd
), header
);
1896 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1897 VMW_RES_DIRTY_CLEAR
, user_surface_converter
,
1898 &cmd
->body
.sid
, NULL
);
1902 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
1904 * @dev_priv: Pointer to a device private struct.
1905 * @sw_context: The software context being used for this batch.
1906 * @header: Pointer to the command header in the command stream.
1908 static int vmw_cmd_shader_define(struct vmw_private
*dev_priv
,
1909 struct vmw_sw_context
*sw_context
,
1910 SVGA3dCmdHeader
*header
)
1912 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDefineShader
);
1915 struct vmw_resource
*ctx
;
1917 cmd
= container_of(header
, typeof(*cmd
), header
);
1919 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
1920 VMW_RES_DIRTY_SET
, user_context_converter
,
1921 &cmd
->body
.cid
, &ctx
);
1922 if (unlikely(ret
!= 0))
1925 if (unlikely(!dev_priv
->has_mob
))
1928 size
= cmd
->header
.size
- sizeof(cmd
->body
);
1929 ret
= vmw_compat_shader_add(dev_priv
, vmw_context_res_man(ctx
),
1930 cmd
->body
.shid
, cmd
+ 1, cmd
->body
.type
,
1931 size
, &sw_context
->staged_cmd_res
);
1932 if (unlikely(ret
!= 0))
1935 return vmw_resource_relocation_add(sw_context
, NULL
,
1936 vmw_ptr_diff(sw_context
->buf_start
,
1942 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
1944 * @dev_priv: Pointer to a device private struct.
1945 * @sw_context: The software context being used for this batch.
1946 * @header: Pointer to the command header in the command stream.
1948 static int vmw_cmd_shader_destroy(struct vmw_private
*dev_priv
,
1949 struct vmw_sw_context
*sw_context
,
1950 SVGA3dCmdHeader
*header
)
1952 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDestroyShader
);
1954 struct vmw_resource
*ctx
;
1956 cmd
= container_of(header
, typeof(*cmd
), header
);
1958 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
1959 VMW_RES_DIRTY_SET
, user_context_converter
,
1960 &cmd
->body
.cid
, &ctx
);
1961 if (unlikely(ret
!= 0))
1964 if (unlikely(!dev_priv
->has_mob
))
1967 ret
= vmw_shader_remove(vmw_context_res_man(ctx
), cmd
->body
.shid
,
1968 cmd
->body
.type
, &sw_context
->staged_cmd_res
);
1969 if (unlikely(ret
!= 0))
1972 return vmw_resource_relocation_add(sw_context
, NULL
,
1973 vmw_ptr_diff(sw_context
->buf_start
,
1979 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
1981 * @dev_priv: Pointer to a device private struct.
1982 * @sw_context: The software context being used for this batch.
1983 * @header: Pointer to the command header in the command stream.
1985 static int vmw_cmd_set_shader(struct vmw_private
*dev_priv
,
1986 struct vmw_sw_context
*sw_context
,
1987 SVGA3dCmdHeader
*header
)
1989 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdSetShader
);
1990 struct vmw_ctx_bindinfo_shader binding
;
1991 struct vmw_resource
*ctx
, *res
= NULL
;
1992 struct vmw_ctx_validation_info
*ctx_info
;
1995 cmd
= container_of(header
, typeof(*cmd
), header
);
1997 if (cmd
->body
.type
>= SVGA3D_SHADERTYPE_PREDX_MAX
) {
1998 VMW_DEBUG_USER("Illegal shader type %u.\n",
1999 (unsigned int) cmd
->body
.type
);
2003 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
2004 VMW_RES_DIRTY_SET
, user_context_converter
,
2005 &cmd
->body
.cid
, &ctx
);
2006 if (unlikely(ret
!= 0))
2009 if (!dev_priv
->has_mob
)
2012 if (cmd
->body
.shid
!= SVGA3D_INVALID_ID
) {
2014 * This is the compat shader path - Per device guest-backed
2015 * shaders, but user-space thinks it's per context host-
2018 res
= vmw_shader_lookup(vmw_context_res_man(ctx
),
2019 cmd
->body
.shid
, cmd
->body
.type
);
2021 ret
= vmw_execbuf_res_noctx_val_add(sw_context
, res
,
2022 VMW_RES_DIRTY_NONE
);
2023 if (unlikely(ret
!= 0))
2026 ret
= vmw_resource_relocation_add
2028 vmw_ptr_diff(sw_context
->buf_start
,
2030 vmw_res_rel_normal
);
2031 if (unlikely(ret
!= 0))
2036 if (IS_ERR_OR_NULL(res
)) {
2037 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_shader
,
2039 user_shader_converter
, &cmd
->body
.shid
,
2041 if (unlikely(ret
!= 0))
2045 ctx_info
= vmw_execbuf_info_from_res(sw_context
, ctx
);
2049 binding
.bi
.ctx
= ctx
;
2050 binding
.bi
.res
= res
;
2051 binding
.bi
.bt
= vmw_ctx_binding_shader
;
2052 binding
.shader_slot
= cmd
->body
.type
- SVGA3D_SHADERTYPE_MIN
;
2053 vmw_binding_add(ctx_info
->staged
, &binding
.bi
, binding
.shader_slot
, 0);
2059 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2061 * @dev_priv: Pointer to a device private struct.
2062 * @sw_context: The software context being used for this batch.
2063 * @header: Pointer to the command header in the command stream.
2065 static int vmw_cmd_set_shader_const(struct vmw_private
*dev_priv
,
2066 struct vmw_sw_context
*sw_context
,
2067 SVGA3dCmdHeader
*header
)
2069 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdSetShaderConst
);
2072 cmd
= container_of(header
, typeof(*cmd
), header
);
2074 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
2075 VMW_RES_DIRTY_SET
, user_context_converter
,
2076 &cmd
->body
.cid
, NULL
);
2077 if (unlikely(ret
!= 0))
2080 if (dev_priv
->has_mob
)
2081 header
->id
= SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE
;
2087 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2089 * @dev_priv: Pointer to a device private struct.
2090 * @sw_context: The software context being used for this batch.
2091 * @header: Pointer to the command header in the command stream.
2093 static int vmw_cmd_bind_gb_shader(struct vmw_private
*dev_priv
,
2094 struct vmw_sw_context
*sw_context
,
2095 SVGA3dCmdHeader
*header
)
2097 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdBindGBShader
) =
2098 container_of(header
, typeof(*cmd
), header
);
2100 return vmw_cmd_switch_backup(dev_priv
, sw_context
, vmw_res_shader
,
2101 user_shader_converter
, &cmd
->body
.shid
,
2102 &cmd
->body
.mobid
, cmd
->body
.offsetInBytes
);
2106 * vmw_cmd_dx_set_single_constant_buffer - Validate
2107 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2109 * @dev_priv: Pointer to a device private struct.
2110 * @sw_context: The software context being used for this batch.
2111 * @header: Pointer to the command header in the command stream.
2114 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private
*dev_priv
,
2115 struct vmw_sw_context
*sw_context
,
2116 SVGA3dCmdHeader
*header
)
2118 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXSetSingleConstantBuffer
);
2119 struct vmw_resource
*res
= NULL
;
2120 struct vmw_ctx_validation_info
*ctx_node
= VMW_GET_CTX_NODE(sw_context
);
2121 struct vmw_ctx_bindinfo_cb binding
;
2127 cmd
= container_of(header
, typeof(*cmd
), header
);
2128 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2129 VMW_RES_DIRTY_NONE
, user_surface_converter
,
2130 &cmd
->body
.sid
, &res
);
2131 if (unlikely(ret
!= 0))
2134 binding
.bi
.ctx
= ctx_node
->ctx
;
2135 binding
.bi
.res
= res
;
2136 binding
.bi
.bt
= vmw_ctx_binding_cb
;
2137 binding
.shader_slot
= cmd
->body
.type
- SVGA3D_SHADERTYPE_MIN
;
2138 binding
.offset
= cmd
->body
.offsetInBytes
;
2139 binding
.size
= cmd
->body
.sizeInBytes
;
2140 binding
.slot
= cmd
->body
.slot
;
2142 if (binding
.shader_slot
>= SVGA3D_NUM_SHADERTYPE_DX10
||
2143 binding
.slot
>= SVGA3D_DX_MAX_CONSTBUFFERS
) {
2144 VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2145 (unsigned int) cmd
->body
.type
,
2146 (unsigned int) binding
.slot
);
2150 vmw_binding_add(ctx_node
->staged
, &binding
.bi
, binding
.shader_slot
,
2157 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2160 * @dev_priv: Pointer to a device private struct.
2161 * @sw_context: The software context being used for this batch.
2162 * @header: Pointer to the command header in the command stream.
2164 static int vmw_cmd_dx_set_shader_res(struct vmw_private
*dev_priv
,
2165 struct vmw_sw_context
*sw_context
,
2166 SVGA3dCmdHeader
*header
)
2168 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXSetShaderResources
) =
2169 container_of(header
, typeof(*cmd
), header
);
2170 u32 num_sr_view
= (cmd
->header
.size
- sizeof(cmd
->body
)) /
2171 sizeof(SVGA3dShaderResourceViewId
);
2173 if ((u64
) cmd
->body
.startView
+ (u64
) num_sr_view
>
2174 (u64
) SVGA3D_DX_MAX_SRVIEWS
||
2175 cmd
->body
.type
>= SVGA3D_SHADERTYPE_DX10_MAX
) {
2176 VMW_DEBUG_USER("Invalid shader binding.\n");
2180 return vmw_view_bindings_add(sw_context
, vmw_view_sr
,
2182 cmd
->body
.type
- SVGA3D_SHADERTYPE_MIN
,
2183 (void *) &cmd
[1], num_sr_view
,
2184 cmd
->body
.startView
);
2188 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2190 * @dev_priv: Pointer to a device private struct.
2191 * @sw_context: The software context being used for this batch.
2192 * @header: Pointer to the command header in the command stream.
2194 static int vmw_cmd_dx_set_shader(struct vmw_private
*dev_priv
,
2195 struct vmw_sw_context
*sw_context
,
2196 SVGA3dCmdHeader
*header
)
2198 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXSetShader
);
2199 struct vmw_resource
*res
= NULL
;
2200 struct vmw_ctx_validation_info
*ctx_node
= VMW_GET_CTX_NODE(sw_context
);
2201 struct vmw_ctx_bindinfo_shader binding
;
2207 cmd
= container_of(header
, typeof(*cmd
), header
);
2209 if (cmd
->body
.type
>= SVGA3D_SHADERTYPE_DX10_MAX
||
2210 cmd
->body
.type
< SVGA3D_SHADERTYPE_MIN
) {
2211 VMW_DEBUG_USER("Illegal shader type %u.\n",
2212 (unsigned int) cmd
->body
.type
);
2216 if (cmd
->body
.shaderId
!= SVGA3D_INVALID_ID
) {
2217 res
= vmw_shader_lookup(sw_context
->man
, cmd
->body
.shaderId
, 0);
2219 VMW_DEBUG_USER("Could not find shader for binding.\n");
2220 return PTR_ERR(res
);
2223 ret
= vmw_execbuf_res_noctx_val_add(sw_context
, res
,
2224 VMW_RES_DIRTY_NONE
);
2229 binding
.bi
.ctx
= ctx_node
->ctx
;
2230 binding
.bi
.res
= res
;
2231 binding
.bi
.bt
= vmw_ctx_binding_dx_shader
;
2232 binding
.shader_slot
= cmd
->body
.type
- SVGA3D_SHADERTYPE_MIN
;
2234 vmw_binding_add(ctx_node
->staged
, &binding
.bi
, binding
.shader_slot
, 0);
2240 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2243 * @dev_priv: Pointer to a device private struct.
2244 * @sw_context: The software context being used for this batch.
2245 * @header: Pointer to the command header in the command stream.
2247 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private
*dev_priv
,
2248 struct vmw_sw_context
*sw_context
,
2249 SVGA3dCmdHeader
*header
)
2251 struct vmw_ctx_validation_info
*ctx_node
= VMW_GET_CTX_NODE(sw_context
);
2252 struct vmw_ctx_bindinfo_vb binding
;
2253 struct vmw_resource
*res
;
2255 SVGA3dCmdHeader header
;
2256 SVGA3dCmdDXSetVertexBuffers body
;
2257 SVGA3dVertexBuffer buf
[];
2264 cmd
= container_of(header
, typeof(*cmd
), header
);
2265 num
= (cmd
->header
.size
- sizeof(cmd
->body
)) /
2266 sizeof(SVGA3dVertexBuffer
);
2267 if ((u64
)num
+ (u64
)cmd
->body
.startBuffer
>
2268 (u64
)SVGA3D_DX_MAX_VERTEXBUFFERS
) {
2269 VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2273 for (i
= 0; i
< num
; i
++) {
2274 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2276 user_surface_converter
,
2277 &cmd
->buf
[i
].sid
, &res
);
2278 if (unlikely(ret
!= 0))
2281 binding
.bi
.ctx
= ctx_node
->ctx
;
2282 binding
.bi
.bt
= vmw_ctx_binding_vb
;
2283 binding
.bi
.res
= res
;
2284 binding
.offset
= cmd
->buf
[i
].offset
;
2285 binding
.stride
= cmd
->buf
[i
].stride
;
2286 binding
.slot
= i
+ cmd
->body
.startBuffer
;
2288 vmw_binding_add(ctx_node
->staged
, &binding
.bi
, 0, binding
.slot
);
2295 * vmw_cmd_dx_ia_set_vertex_buffers - Validate
2296 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2298 * @dev_priv: Pointer to a device private struct.
2299 * @sw_context: The software context being used for this batch.
2300 * @header: Pointer to the command header in the command stream.
2302 static int vmw_cmd_dx_set_index_buffer(struct vmw_private
*dev_priv
,
2303 struct vmw_sw_context
*sw_context
,
2304 SVGA3dCmdHeader
*header
)
2306 struct vmw_ctx_validation_info
*ctx_node
= VMW_GET_CTX_NODE(sw_context
);
2307 struct vmw_ctx_bindinfo_ib binding
;
2308 struct vmw_resource
*res
;
2309 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXSetIndexBuffer
);
2315 cmd
= container_of(header
, typeof(*cmd
), header
);
2316 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2317 VMW_RES_DIRTY_NONE
, user_surface_converter
,
2318 &cmd
->body
.sid
, &res
);
2319 if (unlikely(ret
!= 0))
2322 binding
.bi
.ctx
= ctx_node
->ctx
;
2323 binding
.bi
.res
= res
;
2324 binding
.bi
.bt
= vmw_ctx_binding_ib
;
2325 binding
.offset
= cmd
->body
.offset
;
2326 binding
.format
= cmd
->body
.format
;
2328 vmw_binding_add(ctx_node
->staged
, &binding
.bi
, 0, 0);
2334 * vmw_cmd_dx_set_rendertarget - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2337 * @dev_priv: Pointer to a device private struct.
2338 * @sw_context: The software context being used for this batch.
2339 * @header: Pointer to the command header in the command stream.
2341 static int vmw_cmd_dx_set_rendertargets(struct vmw_private
*dev_priv
,
2342 struct vmw_sw_context
*sw_context
,
2343 SVGA3dCmdHeader
*header
)
2345 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXSetRenderTargets
) =
2346 container_of(header
, typeof(*cmd
), header
);
2347 u32 num_rt_view
= (cmd
->header
.size
- sizeof(cmd
->body
)) /
2348 sizeof(SVGA3dRenderTargetViewId
);
2351 if (num_rt_view
> SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS
) {
2352 VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2356 ret
= vmw_view_bindings_add(sw_context
, vmw_view_ds
, vmw_ctx_binding_ds
,
2357 0, &cmd
->body
.depthStencilViewId
, 1, 0);
2361 return vmw_view_bindings_add(sw_context
, vmw_view_rt
,
2362 vmw_ctx_binding_dx_rt
, 0, (void *)&cmd
[1],
2367 * vmw_cmd_dx_clear_rendertarget_view - Validate
2368 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2370 * @dev_priv: Pointer to a device private struct.
2371 * @sw_context: The software context being used for this batch.
2372 * @header: Pointer to the command header in the command stream.
2374 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private
*dev_priv
,
2375 struct vmw_sw_context
*sw_context
,
2376 SVGA3dCmdHeader
*header
)
2378 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXClearRenderTargetView
) =
2379 container_of(header
, typeof(*cmd
), header
);
2380 struct vmw_resource
*ret
;
2382 ret
= vmw_view_id_val_add(sw_context
, vmw_view_rt
,
2383 cmd
->body
.renderTargetViewId
);
2385 return PTR_ERR_OR_ZERO(ret
);
2389 * vmw_cmd_dx_clear_rendertarget_view - Validate
2390 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2392 * @dev_priv: Pointer to a device private struct.
2393 * @sw_context: The software context being used for this batch.
2394 * @header: Pointer to the command header in the command stream.
2396 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private
*dev_priv
,
2397 struct vmw_sw_context
*sw_context
,
2398 SVGA3dCmdHeader
*header
)
2400 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXClearDepthStencilView
) =
2401 container_of(header
, typeof(*cmd
), header
);
2402 struct vmw_resource
*ret
;
2404 ret
= vmw_view_id_val_add(sw_context
, vmw_view_ds
,
2405 cmd
->body
.depthStencilViewId
);
2407 return PTR_ERR_OR_ZERO(ret
);
2410 static int vmw_cmd_dx_view_define(struct vmw_private
*dev_priv
,
2411 struct vmw_sw_context
*sw_context
,
2412 SVGA3dCmdHeader
*header
)
2414 struct vmw_ctx_validation_info
*ctx_node
= VMW_GET_CTX_NODE(sw_context
);
2415 struct vmw_resource
*srf
;
2416 struct vmw_resource
*res
;
2417 enum vmw_view_type view_type
;
2420 * This is based on the fact that all affected define commands have the
2421 * same initial command body layout.
2424 SVGA3dCmdHeader header
;
2432 view_type
= vmw_view_cmd_to_type(header
->id
);
2433 if (view_type
== vmw_view_max
)
2436 cmd
= container_of(header
, typeof(*cmd
), header
);
2437 if (unlikely(cmd
->sid
== SVGA3D_INVALID_ID
)) {
2438 VMW_DEBUG_USER("Invalid surface id.\n");
2441 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2442 VMW_RES_DIRTY_NONE
, user_surface_converter
,
2444 if (unlikely(ret
!= 0))
2447 res
= vmw_context_cotable(ctx_node
->ctx
, vmw_view_cotables
[view_type
]);
2448 ret
= vmw_cotable_notify(res
, cmd
->defined_id
);
2449 if (unlikely(ret
!= 0))
2452 return vmw_view_add(sw_context
->man
, ctx_node
->ctx
, srf
, view_type
,
2453 cmd
->defined_id
, header
,
2454 header
->size
+ sizeof(*header
),
2455 &sw_context
->staged_cmd_res
);
2459 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2461 * @dev_priv: Pointer to a device private struct.
2462 * @sw_context: The software context being used for this batch.
2463 * @header: Pointer to the command header in the command stream.
2465 static int vmw_cmd_dx_set_so_targets(struct vmw_private
*dev_priv
,
2466 struct vmw_sw_context
*sw_context
,
2467 SVGA3dCmdHeader
*header
)
2469 struct vmw_ctx_validation_info
*ctx_node
= VMW_GET_CTX_NODE(sw_context
);
2470 struct vmw_ctx_bindinfo_so binding
;
2471 struct vmw_resource
*res
;
2473 SVGA3dCmdHeader header
;
2474 SVGA3dCmdDXSetSOTargets body
;
2475 SVGA3dSoTarget targets
[];
2482 cmd
= container_of(header
, typeof(*cmd
), header
);
2483 num
= (cmd
->header
.size
- sizeof(cmd
->body
)) / sizeof(SVGA3dSoTarget
);
2485 if (num
> SVGA3D_DX_MAX_SOTARGETS
) {
2486 VMW_DEBUG_USER("Invalid DX SO binding.\n");
2490 for (i
= 0; i
< num
; i
++) {
2491 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2493 user_surface_converter
,
2494 &cmd
->targets
[i
].sid
, &res
);
2495 if (unlikely(ret
!= 0))
2498 binding
.bi
.ctx
= ctx_node
->ctx
;
2499 binding
.bi
.res
= res
;
2500 binding
.bi
.bt
= vmw_ctx_binding_so
,
2501 binding
.offset
= cmd
->targets
[i
].offset
;
2502 binding
.size
= cmd
->targets
[i
].sizeInBytes
;
2505 vmw_binding_add(ctx_node
->staged
, &binding
.bi
, 0, binding
.slot
);
2511 static int vmw_cmd_dx_so_define(struct vmw_private
*dev_priv
,
2512 struct vmw_sw_context
*sw_context
,
2513 SVGA3dCmdHeader
*header
)
2515 struct vmw_ctx_validation_info
*ctx_node
= VMW_GET_CTX_NODE(sw_context
);
2516 struct vmw_resource
*res
;
2518 * This is based on the fact that all affected define commands have
2519 * the same initial command body layout.
2522 SVGA3dCmdHeader header
;
2525 enum vmw_so_type so_type
;
2531 so_type
= vmw_so_cmd_to_type(header
->id
);
2532 res
= vmw_context_cotable(ctx_node
->ctx
, vmw_so_cotables
[so_type
]);
2533 cmd
= container_of(header
, typeof(*cmd
), header
);
2534 ret
= vmw_cotable_notify(res
, cmd
->defined_id
);
2540 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2543 * @dev_priv: Pointer to a device private struct.
2544 * @sw_context: The software context being used for this batch.
2545 * @header: Pointer to the command header in the command stream.
2547 static int vmw_cmd_dx_check_subresource(struct vmw_private
*dev_priv
,
2548 struct vmw_sw_context
*sw_context
,
2549 SVGA3dCmdHeader
*header
)
2552 SVGA3dCmdHeader header
;
2554 SVGA3dCmdDXReadbackSubResource r_body
;
2555 SVGA3dCmdDXInvalidateSubResource i_body
;
2556 SVGA3dCmdDXUpdateSubResource u_body
;
2557 SVGA3dSurfaceId sid
;
2561 BUILD_BUG_ON(offsetof(typeof(*cmd
), r_body
.sid
) !=
2562 offsetof(typeof(*cmd
), sid
));
2563 BUILD_BUG_ON(offsetof(typeof(*cmd
), i_body
.sid
) !=
2564 offsetof(typeof(*cmd
), sid
));
2565 BUILD_BUG_ON(offsetof(typeof(*cmd
), u_body
.sid
) !=
2566 offsetof(typeof(*cmd
), sid
));
2568 cmd
= container_of(header
, typeof(*cmd
), header
);
2569 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2570 VMW_RES_DIRTY_NONE
, user_surface_converter
,
2574 static int vmw_cmd_dx_cid_check(struct vmw_private
*dev_priv
,
2575 struct vmw_sw_context
*sw_context
,
2576 SVGA3dCmdHeader
*header
)
2578 struct vmw_ctx_validation_info
*ctx_node
= VMW_GET_CTX_NODE(sw_context
);
2587 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2588 * resource for removal.
2590 * @dev_priv: Pointer to a device private struct.
2591 * @sw_context: The software context being used for this batch.
2592 * @header: Pointer to the command header in the command stream.
2594 * Check that the view exists, and if it was not created using this command
2595 * batch, conditionally make this command a NOP.
2597 static int vmw_cmd_dx_view_remove(struct vmw_private
*dev_priv
,
2598 struct vmw_sw_context
*sw_context
,
2599 SVGA3dCmdHeader
*header
)
2601 struct vmw_ctx_validation_info
*ctx_node
= VMW_GET_CTX_NODE(sw_context
);
2603 SVGA3dCmdHeader header
;
2604 union vmw_view_destroy body
;
2605 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2606 enum vmw_view_type view_type
= vmw_view_cmd_to_type(header
->id
);
2607 struct vmw_resource
*view
;
2613 ret
= vmw_view_remove(sw_context
->man
, cmd
->body
.view_id
, view_type
,
2614 &sw_context
->staged_cmd_res
, &view
);
2619 * If the view wasn't created during this command batch, it might
2620 * have been removed due to a context swapout, so add a
2621 * relocation to conditionally make this command a NOP to avoid
2624 return vmw_resource_relocation_add(sw_context
, view
,
2625 vmw_ptr_diff(sw_context
->buf_start
,
2627 vmw_res_rel_cond_nop
);
2631 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2633 * @dev_priv: Pointer to a device private struct.
2634 * @sw_context: The software context being used for this batch.
2635 * @header: Pointer to the command header in the command stream.
2637 static int vmw_cmd_dx_define_shader(struct vmw_private
*dev_priv
,
2638 struct vmw_sw_context
*sw_context
,
2639 SVGA3dCmdHeader
*header
)
2641 struct vmw_ctx_validation_info
*ctx_node
= VMW_GET_CTX_NODE(sw_context
);
2642 struct vmw_resource
*res
;
2643 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXDefineShader
) =
2644 container_of(header
, typeof(*cmd
), header
);
2650 res
= vmw_context_cotable(ctx_node
->ctx
, SVGA_COTABLE_DXSHADER
);
2651 ret
= vmw_cotable_notify(res
, cmd
->body
.shaderId
);
2655 return vmw_dx_shader_add(sw_context
->man
, ctx_node
->ctx
,
2656 cmd
->body
.shaderId
, cmd
->body
.type
,
2657 &sw_context
->staged_cmd_res
);
2661 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2663 * @dev_priv: Pointer to a device private struct.
2664 * @sw_context: The software context being used for this batch.
2665 * @header: Pointer to the command header in the command stream.
2667 static int vmw_cmd_dx_destroy_shader(struct vmw_private
*dev_priv
,
2668 struct vmw_sw_context
*sw_context
,
2669 SVGA3dCmdHeader
*header
)
2671 struct vmw_ctx_validation_info
*ctx_node
= VMW_GET_CTX_NODE(sw_context
);
2672 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXDestroyShader
) =
2673 container_of(header
, typeof(*cmd
), header
);
2679 ret
= vmw_shader_remove(sw_context
->man
, cmd
->body
.shaderId
, 0,
2680 &sw_context
->staged_cmd_res
);
2686 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2688 * @dev_priv: Pointer to a device private struct.
2689 * @sw_context: The software context being used for this batch.
2690 * @header: Pointer to the command header in the command stream.
2692 static int vmw_cmd_dx_bind_shader(struct vmw_private
*dev_priv
,
2693 struct vmw_sw_context
*sw_context
,
2694 SVGA3dCmdHeader
*header
)
2696 struct vmw_resource
*ctx
;
2697 struct vmw_resource
*res
;
2698 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXBindShader
) =
2699 container_of(header
, typeof(*cmd
), header
);
2702 if (cmd
->body
.cid
!= SVGA3D_INVALID_ID
) {
2703 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
2705 user_context_converter
, &cmd
->body
.cid
,
2710 struct vmw_ctx_validation_info
*ctx_node
=
2711 VMW_GET_CTX_NODE(sw_context
);
2716 ctx
= ctx_node
->ctx
;
2719 res
= vmw_shader_lookup(vmw_context_res_man(ctx
), cmd
->body
.shid
, 0);
2721 VMW_DEBUG_USER("Could not find shader to bind.\n");
2722 return PTR_ERR(res
);
2725 ret
= vmw_execbuf_res_noctx_val_add(sw_context
, res
,
2726 VMW_RES_DIRTY_NONE
);
2728 VMW_DEBUG_USER("Error creating resource validation node.\n");
2732 return vmw_cmd_res_switch_backup(dev_priv
, sw_context
, res
,
2734 cmd
->body
.offsetInBytes
);
2738 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2740 * @dev_priv: Pointer to a device private struct.
2741 * @sw_context: The software context being used for this batch.
2742 * @header: Pointer to the command header in the command stream.
2744 static int vmw_cmd_dx_genmips(struct vmw_private
*dev_priv
,
2745 struct vmw_sw_context
*sw_context
,
2746 SVGA3dCmdHeader
*header
)
2748 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXGenMips
) =
2749 container_of(header
, typeof(*cmd
), header
);
2750 struct vmw_resource
*ret
;
2752 ret
= vmw_view_id_val_add(sw_context
, vmw_view_sr
,
2753 cmd
->body
.shaderResourceViewId
);
2755 return PTR_ERR_OR_ZERO(ret
);
2759 * vmw_cmd_dx_transfer_from_buffer - Validate
2760 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2762 * @dev_priv: Pointer to a device private struct.
2763 * @sw_context: The software context being used for this batch.
2764 * @header: Pointer to the command header in the command stream.
2766 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private
*dev_priv
,
2767 struct vmw_sw_context
*sw_context
,
2768 SVGA3dCmdHeader
*header
)
2770 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdDXTransferFromBuffer
) =
2771 container_of(header
, typeof(*cmd
), header
);
2774 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2775 VMW_RES_DIRTY_NONE
, user_surface_converter
,
2776 &cmd
->body
.srcSid
, NULL
);
2780 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2781 VMW_RES_DIRTY_SET
, user_surface_converter
,
2782 &cmd
->body
.destSid
, NULL
);
2786 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2788 * @dev_priv: Pointer to a device private struct.
2789 * @sw_context: The software context being used for this batch.
2790 * @header: Pointer to the command header in the command stream.
2792 static int vmw_cmd_intra_surface_copy(struct vmw_private
*dev_priv
,
2793 struct vmw_sw_context
*sw_context
,
2794 SVGA3dCmdHeader
*header
)
2796 VMW_DECLARE_CMD_VAR(*cmd
, SVGA3dCmdIntraSurfaceCopy
) =
2797 container_of(header
, typeof(*cmd
), header
);
2799 if (!(dev_priv
->capabilities2
& SVGA_CAP2_INTRA_SURFACE_COPY
))
2802 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2803 VMW_RES_DIRTY_SET
, user_surface_converter
,
2804 &cmd
->body
.surface
.sid
, NULL
);
2807 static int vmw_cmd_check_not_3d(struct vmw_private
*dev_priv
,
2808 struct vmw_sw_context
*sw_context
,
2809 void *buf
, uint32_t *size
)
2811 uint32_t size_remaining
= *size
;
2814 cmd_id
= ((uint32_t *)buf
)[0];
2816 case SVGA_CMD_UPDATE
:
2817 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate
);
2819 case SVGA_CMD_DEFINE_GMRFB
:
2820 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB
);
2822 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN
:
2823 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen
);
2825 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB
:
2826 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen
);
2829 VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id
);
2833 if (*size
> size_remaining
) {
2834 VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
2839 if (unlikely(!sw_context
->kernel
)) {
2840 VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id
);
2844 if (cmd_id
== SVGA_CMD_DEFINE_GMRFB
)
2845 return vmw_cmd_check_define_gmrfb(dev_priv
, sw_context
, buf
);
2850 static const struct vmw_cmd_entry vmw_cmd_entries
[SVGA_3D_CMD_MAX
] = {
2851 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE
, &vmw_cmd_invalid
,
2852 false, false, false),
2853 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY
, &vmw_cmd_invalid
,
2854 false, false, false),
2855 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY
, &vmw_cmd_surface_copy_check
,
2856 true, false, false),
2857 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT
, &vmw_cmd_stretch_blt_check
,
2858 true, false, false),
2859 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA
, &vmw_cmd_dma
,
2860 true, false, false),
2861 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE
, &vmw_cmd_invalid
,
2862 false, false, false),
2863 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY
, &vmw_cmd_invalid
,
2864 false, false, false),
2865 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM
, &vmw_cmd_cid_check
,
2866 true, false, false),
2867 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE
, &vmw_cmd_cid_check
,
2868 true, false, false),
2869 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE
, &vmw_cmd_cid_check
,
2870 true, false, false),
2871 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET
,
2872 &vmw_cmd_set_render_target_check
, true, false, false),
2873 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE
, &vmw_cmd_tex_state
,
2874 true, false, false),
2875 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL
, &vmw_cmd_cid_check
,
2876 true, false, false),
2877 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA
, &vmw_cmd_cid_check
,
2878 true, false, false),
2879 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED
, &vmw_cmd_cid_check
,
2880 true, false, false),
2881 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT
, &vmw_cmd_cid_check
,
2882 true, false, false),
2883 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE
, &vmw_cmd_cid_check
,
2884 true, false, false),
2885 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR
, &vmw_cmd_cid_check
,
2886 true, false, false),
2887 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT
, &vmw_cmd_present_check
,
2888 false, false, false),
2889 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE
, &vmw_cmd_shader_define
,
2890 true, false, false),
2891 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY
, &vmw_cmd_shader_destroy
,
2892 true, false, false),
2893 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER
, &vmw_cmd_set_shader
,
2894 true, false, false),
2895 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST
, &vmw_cmd_set_shader_const
,
2896 true, false, false),
2897 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES
, &vmw_cmd_draw
,
2898 true, false, false),
2899 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT
, &vmw_cmd_cid_check
,
2900 true, false, false),
2901 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY
, &vmw_cmd_begin_query
,
2902 true, false, false),
2903 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY
, &vmw_cmd_end_query
,
2904 true, false, false),
2905 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY
, &vmw_cmd_wait_query
,
2906 true, false, false),
2907 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK
, &vmw_cmd_ok
,
2908 true, false, false),
2909 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN
,
2910 &vmw_cmd_blt_surf_screen_check
, false, false, false),
2911 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2
, &vmw_cmd_invalid
,
2912 false, false, false),
2913 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS
, &vmw_cmd_invalid
,
2914 false, false, false),
2915 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE
, &vmw_cmd_invalid
,
2916 false, false, false),
2917 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE
, &vmw_cmd_invalid
,
2918 false, false, false),
2919 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA
, &vmw_cmd_invalid
,
2920 false, false, false),
2921 VMW_CMD_DEF(SVGA_3D_CMD_DEAD1
, &vmw_cmd_invalid
,
2922 false, false, false),
2923 VMW_CMD_DEF(SVGA_3D_CMD_DEAD2
, &vmw_cmd_invalid
,
2924 false, false, false),
2925 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT
, &vmw_cmd_invalid
,
2926 false, false, false),
2927 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT
, &vmw_cmd_invalid
,
2928 false, false, false),
2929 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT
, &vmw_cmd_invalid
,
2930 false, false, false),
2931 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL
, &vmw_cmd_invalid
,
2932 false, false, false),
2933 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND
, &vmw_cmd_invalid
,
2934 false, false, false),
2935 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND
, &vmw_cmd_invalid
,
2936 false, false, false),
2937 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE
, &vmw_cmd_invalid
,
2938 false, false, true),
2939 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE
, &vmw_cmd_invalid
,
2940 false, false, true),
2941 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB
, &vmw_cmd_invalid
,
2942 false, false, true),
2943 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB
, &vmw_cmd_invalid
,
2944 false, false, true),
2945 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64
, &vmw_cmd_invalid
,
2946 false, false, true),
2947 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING
, &vmw_cmd_invalid
,
2948 false, false, true),
2949 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE
, &vmw_cmd_invalid
,
2950 false, false, true),
2951 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE
, &vmw_cmd_invalid
,
2952 false, false, true),
2953 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE
, &vmw_cmd_bind_gb_surface
,
2955 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE
, &vmw_cmd_invalid
,
2956 false, false, true),
2957 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE
, &vmw_cmd_update_gb_image
,
2959 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE
,
2960 &vmw_cmd_update_gb_surface
, true, false, true),
2961 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE
,
2962 &vmw_cmd_readback_gb_image
, true, false, true),
2963 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE
,
2964 &vmw_cmd_readback_gb_surface
, true, false, true),
2965 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE
,
2966 &vmw_cmd_invalidate_gb_image
, true, false, true),
2967 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE
,
2968 &vmw_cmd_invalidate_gb_surface
, true, false, true),
2969 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT
, &vmw_cmd_invalid
,
2970 false, false, true),
2971 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT
, &vmw_cmd_invalid
,
2972 false, false, true),
2973 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT
, &vmw_cmd_invalid
,
2974 false, false, true),
2975 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT
, &vmw_cmd_invalid
,
2976 false, false, true),
2977 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT
, &vmw_cmd_invalid
,
2978 false, false, true),
2979 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER
, &vmw_cmd_invalid
,
2980 false, false, true),
2981 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER
, &vmw_cmd_bind_gb_shader
,
2983 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER
, &vmw_cmd_invalid
,
2984 false, false, true),
2985 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64
, &vmw_cmd_invalid
,
2986 false, false, false),
2987 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY
, &vmw_cmd_begin_gb_query
,
2989 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY
, &vmw_cmd_end_gb_query
,
2991 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY
, &vmw_cmd_wait_gb_query
,
2993 VMW_CMD_DEF(SVGA_3D_CMD_NOP
, &vmw_cmd_ok
,
2995 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR
, &vmw_cmd_ok
,
2997 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART
, &vmw_cmd_invalid
,
2998 false, false, true),
2999 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART
, &vmw_cmd_invalid
,
3000 false, false, true),
3001 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART
, &vmw_cmd_invalid
,
3002 false, false, true),
3003 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE
, &vmw_cmd_invalid
,
3004 false, false, true),
3005 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET
, &vmw_cmd_invalid
,
3006 false, false, true),
3007 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET
, &vmw_cmd_invalid
,
3008 false, false, true),
3009 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET
, &vmw_cmd_invalid
,
3010 false, false, true),
3011 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET
, &vmw_cmd_invalid
,
3012 false, false, true),
3013 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL
, &vmw_cmd_invalid
,
3014 false, false, true),
3015 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL
, &vmw_cmd_invalid
,
3016 false, false, true),
3017 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE
, &vmw_cmd_cid_check
,
3019 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA
, &vmw_cmd_invalid
,
3020 false, false, true),
3021 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH
, &vmw_cmd_invalid
,
3022 false, false, true),
3023 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE
, &vmw_cmd_invalid
,
3024 false, false, true),
3025 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2
, &vmw_cmd_invalid
,
3026 false, false, true),
3029 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT
, &vmw_cmd_invalid
,
3030 false, false, true),
3031 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT
, &vmw_cmd_invalid
,
3032 false, false, true),
3033 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT
, &vmw_cmd_invalid
,
3034 false, false, true),
3035 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT
, &vmw_cmd_invalid
,
3036 false, false, true),
3037 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT
, &vmw_cmd_invalid
,
3038 false, false, true),
3039 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER
,
3040 &vmw_cmd_dx_set_single_constant_buffer
, true, false, true),
3041 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
,
3042 &vmw_cmd_dx_set_shader_res
, true, false, true),
3043 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER
, &vmw_cmd_dx_set_shader
,
3045 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS
, &vmw_cmd_dx_cid_check
,
3047 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW
, &vmw_cmd_dx_cid_check
,
3049 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED
, &vmw_cmd_dx_cid_check
,
3051 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED
, &vmw_cmd_dx_cid_check
,
3053 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED
,
3054 &vmw_cmd_dx_cid_check
, true, false, true),
3055 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO
, &vmw_cmd_dx_cid_check
,
3057 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
,
3058 &vmw_cmd_dx_set_vertex_buffers
, true, false, true),
3059 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER
,
3060 &vmw_cmd_dx_set_index_buffer
, true, false, true),
3061 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS
,
3062 &vmw_cmd_dx_set_rendertargets
, true, false, true),
3063 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE
, &vmw_cmd_dx_cid_check
,
3065 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE
,
3066 &vmw_cmd_dx_cid_check
, true, false, true),
3067 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE
,
3068 &vmw_cmd_dx_cid_check
, true, false, true),
3069 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY
, &vmw_cmd_dx_define_query
,
3071 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY
, &vmw_cmd_dx_cid_check
,
3073 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY
, &vmw_cmd_dx_bind_query
,
3075 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET
,
3076 &vmw_cmd_dx_cid_check
, true, false, true),
3077 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY
, &vmw_cmd_dx_cid_check
,
3079 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY
, &vmw_cmd_dx_cid_check
,
3081 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY
, &vmw_cmd_invalid
,
3083 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION
, &vmw_cmd_dx_cid_check
,
3085 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS
, &vmw_cmd_dx_cid_check
,
3087 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS
, &vmw_cmd_dx_cid_check
,
3089 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW
,
3090 &vmw_cmd_dx_clear_rendertarget_view
, true, false, true),
3091 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW
,
3092 &vmw_cmd_dx_clear_depthstencil_view
, true, false, true),
3093 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY
, &vmw_cmd_invalid
,
3095 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS
, &vmw_cmd_dx_genmips
,
3097 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE
,
3098 &vmw_cmd_dx_check_subresource
, true, false, true),
3099 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE
,
3100 &vmw_cmd_dx_check_subresource
, true, false, true),
3101 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE
,
3102 &vmw_cmd_dx_check_subresource
, true, false, true),
3103 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW
,
3104 &vmw_cmd_dx_view_define
, true, false, true),
3105 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW
,
3106 &vmw_cmd_dx_view_remove
, true, false, true),
3107 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW
,
3108 &vmw_cmd_dx_view_define
, true, false, true),
3109 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW
,
3110 &vmw_cmd_dx_view_remove
, true, false, true),
3111 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW
,
3112 &vmw_cmd_dx_view_define
, true, false, true),
3113 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW
,
3114 &vmw_cmd_dx_view_remove
, true, false, true),
3115 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT
,
3116 &vmw_cmd_dx_so_define
, true, false, true),
3117 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT
,
3118 &vmw_cmd_dx_cid_check
, true, false, true),
3119 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE
,
3120 &vmw_cmd_dx_so_define
, true, false, true),
3121 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE
,
3122 &vmw_cmd_dx_cid_check
, true, false, true),
3123 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE
,
3124 &vmw_cmd_dx_so_define
, true, false, true),
3125 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE
,
3126 &vmw_cmd_dx_cid_check
, true, false, true),
3127 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE
,
3128 &vmw_cmd_dx_so_define
, true, false, true),
3129 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE
,
3130 &vmw_cmd_dx_cid_check
, true, false, true),
3131 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE
,
3132 &vmw_cmd_dx_so_define
, true, false, true),
3133 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE
,
3134 &vmw_cmd_dx_cid_check
, true, false, true),
3135 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER
,
3136 &vmw_cmd_dx_define_shader
, true, false, true),
3137 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER
,
3138 &vmw_cmd_dx_destroy_shader
, true, false, true),
3139 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER
,
3140 &vmw_cmd_dx_bind_shader
, true, false, true),
3141 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT
,
3142 &vmw_cmd_dx_so_define
, true, false, true),
3143 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT
,
3144 &vmw_cmd_dx_cid_check
, true, false, true),
3145 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT
, &vmw_cmd_dx_cid_check
,
3147 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS
,
3148 &vmw_cmd_dx_set_so_targets
, true, false, true),
3149 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT
,
3150 &vmw_cmd_dx_cid_check
, true, false, true),
3151 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY
,
3152 &vmw_cmd_dx_cid_check
, true, false, true),
3153 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY
,
3154 &vmw_cmd_buffer_copy_check
, true, false, true),
3155 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION
,
3156 &vmw_cmd_pred_copy_check
, true, false, true),
3157 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER
,
3158 &vmw_cmd_dx_transfer_from_buffer
,
3160 VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY
, &vmw_cmd_intra_surface_copy
,
3164 bool vmw_cmd_describe(const void *buf
, u32
*size
, char const **cmd
)
3166 u32 cmd_id
= ((u32
*) buf
)[0];
3168 if (cmd_id
>= SVGA_CMD_MAX
) {
3169 SVGA3dCmdHeader
*header
= (SVGA3dCmdHeader
*) buf
;
3170 const struct vmw_cmd_entry
*entry
;
3172 *size
= header
->size
+ sizeof(SVGA3dCmdHeader
);
3173 cmd_id
= header
->id
;
3174 if (cmd_id
>= SVGA_3D_CMD_MAX
)
3177 cmd_id
-= SVGA_3D_CMD_BASE
;
3178 entry
= &vmw_cmd_entries
[cmd_id
];
3179 *cmd
= entry
->cmd_name
;
3184 case SVGA_CMD_UPDATE
:
3185 *cmd
= "SVGA_CMD_UPDATE";
3186 *size
= sizeof(u32
) + sizeof(SVGAFifoCmdUpdate
);
3188 case SVGA_CMD_DEFINE_GMRFB
:
3189 *cmd
= "SVGA_CMD_DEFINE_GMRFB";
3190 *size
= sizeof(u32
) + sizeof(SVGAFifoCmdDefineGMRFB
);
3192 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN
:
3193 *cmd
= "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3194 *size
= sizeof(u32
) + sizeof(SVGAFifoCmdBlitGMRFBToScreen
);
3196 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB
:
3197 *cmd
= "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3198 *size
= sizeof(u32
) + sizeof(SVGAFifoCmdBlitGMRFBToScreen
);
3209 static int vmw_cmd_check(struct vmw_private
*dev_priv
,
3210 struct vmw_sw_context
*sw_context
, void *buf
,
3214 uint32_t size_remaining
= *size
;
3215 SVGA3dCmdHeader
*header
= (SVGA3dCmdHeader
*) buf
;
3217 const struct vmw_cmd_entry
*entry
;
3218 bool gb
= dev_priv
->capabilities
& SVGA_CAP_GBOBJECTS
;
3220 cmd_id
= ((uint32_t *)buf
)[0];
3221 /* Handle any none 3D commands */
3222 if (unlikely(cmd_id
< SVGA_CMD_MAX
))
3223 return vmw_cmd_check_not_3d(dev_priv
, sw_context
, buf
, size
);
3226 cmd_id
= header
->id
;
3227 *size
= header
->size
+ sizeof(SVGA3dCmdHeader
);
3229 cmd_id
-= SVGA_3D_CMD_BASE
;
3230 if (unlikely(*size
> size_remaining
))
3233 if (unlikely(cmd_id
>= SVGA_3D_CMD_MAX
- SVGA_3D_CMD_BASE
))
3236 entry
= &vmw_cmd_entries
[cmd_id
];
3237 if (unlikely(!entry
->func
))
3240 if (unlikely(!entry
->user_allow
&& !sw_context
->kernel
))
3241 goto out_privileged
;
3243 if (unlikely(entry
->gb_disable
&& gb
))
3246 if (unlikely(entry
->gb_enable
&& !gb
))
3249 ret
= entry
->func(dev_priv
, sw_context
, header
);
3250 if (unlikely(ret
!= 0)) {
3251 VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3252 cmd_id
+ SVGA_3D_CMD_BASE
, ret
);
3258 VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3259 cmd_id
+ SVGA_3D_CMD_BASE
);
3262 VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3263 cmd_id
+ SVGA_3D_CMD_BASE
);
3266 VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3267 cmd_id
+ SVGA_3D_CMD_BASE
);
3270 VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3271 cmd_id
+ SVGA_3D_CMD_BASE
);
3275 static int vmw_cmd_check_all(struct vmw_private
*dev_priv
,
3276 struct vmw_sw_context
*sw_context
, void *buf
,
3279 int32_t cur_size
= size
;
3282 sw_context
->buf_start
= buf
;
3284 while (cur_size
> 0) {
3286 ret
= vmw_cmd_check(dev_priv
, sw_context
, buf
, &size
);
3287 if (unlikely(ret
!= 0))
3289 buf
= (void *)((unsigned long) buf
+ size
);
3293 if (unlikely(cur_size
!= 0)) {
3294 VMW_DEBUG_USER("Command verifier out of sync.\n");
3301 static void vmw_free_relocations(struct vmw_sw_context
*sw_context
)
3303 /* Memory is validation context memory, so no need to free it */
3304 INIT_LIST_HEAD(&sw_context
->bo_relocations
);
3307 static void vmw_apply_relocations(struct vmw_sw_context
*sw_context
)
3309 struct vmw_relocation
*reloc
;
3310 struct ttm_buffer_object
*bo
;
3312 list_for_each_entry(reloc
, &sw_context
->bo_relocations
, head
) {
3313 bo
= &reloc
->vbo
->base
;
3314 switch (bo
->mem
.mem_type
) {
3316 reloc
->location
->offset
+= bo
->offset
;
3317 reloc
->location
->gmrId
= SVGA_GMR_FRAMEBUFFER
;
3320 reloc
->location
->gmrId
= bo
->mem
.start
;
3323 *reloc
->mob_loc
= bo
->mem
.start
;
3329 vmw_free_relocations(sw_context
);
3332 static int vmw_resize_cmd_bounce(struct vmw_sw_context
*sw_context
,
3335 if (likely(sw_context
->cmd_bounce_size
>= size
))
3338 if (sw_context
->cmd_bounce_size
== 0)
3339 sw_context
->cmd_bounce_size
= VMWGFX_CMD_BOUNCE_INIT_SIZE
;
3341 while (sw_context
->cmd_bounce_size
< size
) {
3342 sw_context
->cmd_bounce_size
=
3343 PAGE_ALIGN(sw_context
->cmd_bounce_size
+
3344 (sw_context
->cmd_bounce_size
>> 1));
3347 vfree(sw_context
->cmd_bounce
);
3348 sw_context
->cmd_bounce
= vmalloc(sw_context
->cmd_bounce_size
);
3350 if (sw_context
->cmd_bounce
== NULL
) {
3351 VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3352 sw_context
->cmd_bounce_size
= 0;
3360 * vmw_execbuf_fence_commands - create and submit a command stream fence
3362 * Creates a fence object and submits a command stream marker.
3363 * If this fails for some reason, We sync the fifo and return NULL.
3364 * It is then safe to fence buffers with a NULL pointer.
3366 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3367 * userspace handle if @p_handle is not NULL, otherwise not.
3370 int vmw_execbuf_fence_commands(struct drm_file
*file_priv
,
3371 struct vmw_private
*dev_priv
,
3372 struct vmw_fence_obj
**p_fence
,
3377 bool synced
= false;
3379 /* p_handle implies file_priv. */
3380 BUG_ON(p_handle
!= NULL
&& file_priv
== NULL
);
3382 ret
= vmw_fifo_send_fence(dev_priv
, &sequence
);
3383 if (unlikely(ret
!= 0)) {
3384 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3388 if (p_handle
!= NULL
)
3389 ret
= vmw_user_fence_create(file_priv
, dev_priv
->fman
,
3390 sequence
, p_fence
, p_handle
);
3392 ret
= vmw_fence_create(dev_priv
->fman
, sequence
, p_fence
);
3394 if (unlikely(ret
!= 0 && !synced
)) {
3395 (void) vmw_fallback_wait(dev_priv
, false, false, sequence
,
3396 false, VMW_FENCE_WAIT_TIMEOUT
);
3404 * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
3406 * @dev_priv: Pointer to a vmw_private struct.
3407 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3408 * @ret: Return value from fence object creation.
3409 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3410 * the information should be copied.
3411 * @fence: Pointer to the fenc object.
3412 * @fence_handle: User-space fence handle.
3413 * @out_fence_fd: exported file descriptor for the fence. -1 if not used
3414 * @sync_file: Only used to clean up in case of an error in this function.
3416 * This function copies fence information to user-space. If copying fails, the
3417 * user-space struct drm_vmw_fence_rep::error member is hopefully left
3418 * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3419 * will hopefully be detected.
3421 * Also if copying fails, user-space will be unable to signal the fence object
3422 * so we wait for it immediately, and then unreference the user-space reference.
3425 vmw_execbuf_copy_fence_user(struct vmw_private
*dev_priv
,
3426 struct vmw_fpriv
*vmw_fp
, int ret
,
3427 struct drm_vmw_fence_rep __user
*user_fence_rep
,
3428 struct vmw_fence_obj
*fence
, uint32_t fence_handle
,
3429 int32_t out_fence_fd
, struct sync_file
*sync_file
)
3431 struct drm_vmw_fence_rep fence_rep
;
3433 if (user_fence_rep
== NULL
)
3436 memset(&fence_rep
, 0, sizeof(fence_rep
));
3438 fence_rep
.error
= ret
;
3439 fence_rep
.fd
= out_fence_fd
;
3441 BUG_ON(fence
== NULL
);
3443 fence_rep
.handle
= fence_handle
;
3444 fence_rep
.seqno
= fence
->base
.seqno
;
3445 vmw_update_seqno(dev_priv
, &dev_priv
->fifo
);
3446 fence_rep
.passed_seqno
= dev_priv
->last_read_seqno
;
3450 * copy_to_user errors will be detected by user space not seeing
3451 * fence_rep::error filled in. Typically user-space would have pre-set
3452 * that member to -EFAULT.
3454 ret
= copy_to_user(user_fence_rep
, &fence_rep
,
3458 * User-space lost the fence object. We need to sync and unreference the
3461 if (unlikely(ret
!= 0) && (fence_rep
.error
== 0)) {
3463 fput(sync_file
->file
);
3465 if (fence_rep
.fd
!= -1) {
3466 put_unused_fd(fence_rep
.fd
);
3470 ttm_ref_object_base_unref(vmw_fp
->tfile
, fence_handle
,
3472 VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3473 (void) vmw_fence_obj_wait(fence
, false, false,
3474 VMW_FENCE_WAIT_TIMEOUT
);
3479 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3481 * @dev_priv: Pointer to a device private structure.
3482 * @kernel_commands: Pointer to the unpatched command batch.
3483 * @command_size: Size of the unpatched command batch.
3484 * @sw_context: Structure holding the relocation lists.
3486 * Side effects: If this function returns 0, then the command batch pointed to
3487 * by @kernel_commands will have been modified.
3489 static int vmw_execbuf_submit_fifo(struct vmw_private
*dev_priv
,
3490 void *kernel_commands
, u32 command_size
,
3491 struct vmw_sw_context
*sw_context
)
3495 if (sw_context
->dx_ctx_node
)
3496 cmd
= VMW_FIFO_RESERVE_DX(dev_priv
, command_size
,
3497 sw_context
->dx_ctx_node
->ctx
->id
);
3499 cmd
= VMW_FIFO_RESERVE(dev_priv
, command_size
);
3504 vmw_apply_relocations(sw_context
);
3505 memcpy(cmd
, kernel_commands
, command_size
);
3506 vmw_resource_relocations_apply(cmd
, &sw_context
->res_relocations
);
3507 vmw_resource_relocations_free(&sw_context
->res_relocations
);
3508 vmw_fifo_commit(dev_priv
, command_size
);
3514 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3515 * command buffer manager.
3517 * @dev_priv: Pointer to a device private structure.
3518 * @header: Opaque handle to the command buffer allocation.
3519 * @command_size: Size of the unpatched command batch.
3520 * @sw_context: Structure holding the relocation lists.
3522 * Side effects: If this function returns 0, then the command buffer represented
3523 * by @header will have been modified.
3525 static int vmw_execbuf_submit_cmdbuf(struct vmw_private
*dev_priv
,
3526 struct vmw_cmdbuf_header
*header
,
3528 struct vmw_sw_context
*sw_context
)
3530 u32 id
= ((sw_context
->dx_ctx_node
) ? sw_context
->dx_ctx_node
->ctx
->id
:
3532 void *cmd
= vmw_cmdbuf_reserve(dev_priv
->cman
, command_size
, id
, false,
3535 vmw_apply_relocations(sw_context
);
3536 vmw_resource_relocations_apply(cmd
, &sw_context
->res_relocations
);
3537 vmw_resource_relocations_free(&sw_context
->res_relocations
);
3538 vmw_cmdbuf_commit(dev_priv
->cman
, command_size
, header
, false);
3544 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3545 * submission using a command buffer.
3547 * @dev_priv: Pointer to a device private structure.
3548 * @user_commands: User-space pointer to the commands to be submitted.
3549 * @command_size: Size of the unpatched command batch.
3550 * @header: Out parameter returning the opaque pointer to the command buffer.
3552 * This function checks whether we can use the command buffer manager for
3553 * submission and if so, creates a command buffer of suitable size and copies
3554 * the user data into that buffer.
3556 * On successful return, the function returns a pointer to the data in the
3557 * command buffer and *@header is set to non-NULL.
3559 * If command buffers could not be used, the function will return the value of
3560 * @kernel_commands on function call. That value may be NULL. In that case, the
3561 * value of *@header will be set to NULL.
3563 * If an error is encountered, the function will return a pointer error value.
3564 * If the function is interrupted by a signal while sleeping, it will return
3565 * -ERESTARTSYS casted to a pointer error value.
3567 static void *vmw_execbuf_cmdbuf(struct vmw_private
*dev_priv
,
3568 void __user
*user_commands
,
3569 void *kernel_commands
, u32 command_size
,
3570 struct vmw_cmdbuf_header
**header
)
3576 if (command_size
> SVGA_CB_MAX_SIZE
) {
3577 VMW_DEBUG_USER("Command buffer is too large.\n");
3578 return ERR_PTR(-EINVAL
);
3581 if (!dev_priv
->cman
|| kernel_commands
)
3582 return kernel_commands
;
3584 /* If possible, add a little space for fencing. */
3585 cmdbuf_size
= command_size
+ 512;
3586 cmdbuf_size
= min_t(size_t, cmdbuf_size
, SVGA_CB_MAX_SIZE
);
3587 kernel_commands
= vmw_cmdbuf_alloc(dev_priv
->cman
, cmdbuf_size
, true,
3589 if (IS_ERR(kernel_commands
))
3590 return kernel_commands
;
3592 ret
= copy_from_user(kernel_commands
, user_commands
, command_size
);
3594 VMW_DEBUG_USER("Failed copying commands.\n");
3595 vmw_cmdbuf_header_free(*header
);
3597 return ERR_PTR(-EFAULT
);
3600 return kernel_commands
;
3603 static int vmw_execbuf_tie_context(struct vmw_private
*dev_priv
,
3604 struct vmw_sw_context
*sw_context
,
3607 struct vmw_resource
*res
;
3611 if (handle
== SVGA3D_INVALID_ID
)
3614 size
= vmw_execbuf_res_size(dev_priv
, vmw_res_dx_context
);
3615 ret
= vmw_validation_preload_res(sw_context
->ctx
, size
);
3619 res
= vmw_user_resource_noref_lookup_handle
3620 (dev_priv
, sw_context
->fp
->tfile
, handle
,
3621 user_context_converter
);
3623 VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
3624 (unsigned int) handle
);
3625 return PTR_ERR(res
);
3628 ret
= vmw_execbuf_res_noref_val_add(sw_context
, res
, VMW_RES_DIRTY_SET
);
3629 if (unlikely(ret
!= 0))
3632 sw_context
->dx_ctx_node
= vmw_execbuf_info_from_res(sw_context
, res
);
3633 sw_context
->man
= vmw_context_res_man(res
);
3638 int vmw_execbuf_process(struct drm_file
*file_priv
,
3639 struct vmw_private
*dev_priv
,
3640 void __user
*user_commands
, void *kernel_commands
,
3641 uint32_t command_size
, uint64_t throttle_us
,
3642 uint32_t dx_context_handle
,
3643 struct drm_vmw_fence_rep __user
*user_fence_rep
,
3644 struct vmw_fence_obj
**out_fence
, uint32_t flags
)
3646 struct vmw_sw_context
*sw_context
= &dev_priv
->ctx
;
3647 struct vmw_fence_obj
*fence
= NULL
;
3648 struct vmw_cmdbuf_header
*header
;
3649 uint32_t handle
= 0;
3651 int32_t out_fence_fd
= -1;
3652 struct sync_file
*sync_file
= NULL
;
3653 DECLARE_VAL_CONTEXT(val_ctx
, &sw_context
->res_ht
, 1);
3655 vmw_validation_set_val_mem(&val_ctx
, &dev_priv
->vvm
);
3657 if (flags
& DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD
) {
3658 out_fence_fd
= get_unused_fd_flags(O_CLOEXEC
);
3659 if (out_fence_fd
< 0) {
3660 VMW_DEBUG_USER("Failed to get a fence fd.\n");
3661 return out_fence_fd
;
3666 ret
= vmw_wait_lag(dev_priv
, &dev_priv
->fifo
.marker_queue
,
3670 goto out_free_fence_fd
;
3673 kernel_commands
= vmw_execbuf_cmdbuf(dev_priv
, user_commands
,
3674 kernel_commands
, command_size
,
3676 if (IS_ERR(kernel_commands
)) {
3677 ret
= PTR_ERR(kernel_commands
);
3678 goto out_free_fence_fd
;
3681 ret
= mutex_lock_interruptible(&dev_priv
->cmdbuf_mutex
);
3684 goto out_free_header
;
3687 sw_context
->kernel
= false;
3688 if (kernel_commands
== NULL
) {
3689 ret
= vmw_resize_cmd_bounce(sw_context
, command_size
);
3690 if (unlikely(ret
!= 0))
3693 ret
= copy_from_user(sw_context
->cmd_bounce
, user_commands
,
3695 if (unlikely(ret
!= 0)) {
3697 VMW_DEBUG_USER("Failed copying commands.\n");
3701 kernel_commands
= sw_context
->cmd_bounce
;
3702 } else if (!header
) {
3703 sw_context
->kernel
= true;
3706 sw_context
->fp
= vmw_fpriv(file_priv
);
3707 INIT_LIST_HEAD(&sw_context
->ctx_list
);
3708 sw_context
->cur_query_bo
= dev_priv
->pinned_bo
;
3709 sw_context
->last_query_ctx
= NULL
;
3710 sw_context
->needs_post_query_barrier
= false;
3711 sw_context
->dx_ctx_node
= NULL
;
3712 sw_context
->dx_query_mob
= NULL
;
3713 sw_context
->dx_query_ctx
= NULL
;
3714 memset(sw_context
->res_cache
, 0, sizeof(sw_context
->res_cache
));
3715 INIT_LIST_HEAD(&sw_context
->res_relocations
);
3716 INIT_LIST_HEAD(&sw_context
->bo_relocations
);
3718 if (sw_context
->staged_bindings
)
3719 vmw_binding_state_reset(sw_context
->staged_bindings
);
3721 if (!sw_context
->res_ht_initialized
) {
3722 ret
= drm_ht_create(&sw_context
->res_ht
, VMW_RES_HT_ORDER
);
3723 if (unlikely(ret
!= 0))
3726 sw_context
->res_ht_initialized
= true;
3729 INIT_LIST_HEAD(&sw_context
->staged_cmd_res
);
3730 sw_context
->ctx
= &val_ctx
;
3731 ret
= vmw_execbuf_tie_context(dev_priv
, sw_context
, dx_context_handle
);
3732 if (unlikely(ret
!= 0))
3735 ret
= vmw_cmd_check_all(dev_priv
, sw_context
, kernel_commands
,
3737 if (unlikely(ret
!= 0))
3740 ret
= vmw_resources_reserve(sw_context
);
3741 if (unlikely(ret
!= 0))
3744 ret
= vmw_validation_bo_reserve(&val_ctx
, true);
3745 if (unlikely(ret
!= 0))
3748 ret
= vmw_validation_bo_validate(&val_ctx
, true);
3749 if (unlikely(ret
!= 0))
3752 ret
= vmw_validation_res_validate(&val_ctx
, true);
3753 if (unlikely(ret
!= 0))
3756 vmw_validation_drop_ht(&val_ctx
);
3758 ret
= mutex_lock_interruptible(&dev_priv
->binding_mutex
);
3759 if (unlikely(ret
!= 0)) {
3764 if (dev_priv
->has_mob
) {
3765 ret
= vmw_rebind_contexts(sw_context
);
3766 if (unlikely(ret
!= 0))
3767 goto out_unlock_binding
;
3771 ret
= vmw_execbuf_submit_fifo(dev_priv
, kernel_commands
,
3772 command_size
, sw_context
);
3774 ret
= vmw_execbuf_submit_cmdbuf(dev_priv
, header
, command_size
,
3778 mutex_unlock(&dev_priv
->binding_mutex
);
3782 vmw_query_bo_switch_commit(dev_priv
, sw_context
);
3783 ret
= vmw_execbuf_fence_commands(file_priv
, dev_priv
, &fence
,
3784 (user_fence_rep
) ? &handle
: NULL
);
3786 * This error is harmless, because if fence submission fails,
3787 * vmw_fifo_send_fence will sync. The error will be propagated to
3788 * user-space in @fence_rep
3791 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3793 vmw_execbuf_bindings_commit(sw_context
, false);
3794 vmw_bind_dx_query_mob(sw_context
);
3795 vmw_validation_res_unreserve(&val_ctx
, false);
3797 vmw_validation_bo_fence(sw_context
->ctx
, fence
);
3799 if (unlikely(dev_priv
->pinned_bo
!= NULL
&& !dev_priv
->query_cid_valid
))
3800 __vmw_execbuf_release_pinned_bo(dev_priv
, fence
);
3803 * If anything fails here, give up trying to export the fence and do a
3804 * sync since the user mode will not be able to sync the fence itself.
3805 * This ensures we are still functionally correct.
3807 if (flags
& DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD
) {
3809 sync_file
= sync_file_create(&fence
->base
);
3811 VMW_DEBUG_USER("Sync file create failed for fence\n");
3812 put_unused_fd(out_fence_fd
);
3815 (void) vmw_fence_obj_wait(fence
, false, false,
3816 VMW_FENCE_WAIT_TIMEOUT
);
3818 /* Link the fence with the FD created earlier */
3819 fd_install(out_fence_fd
, sync_file
->file
);
3823 vmw_execbuf_copy_fence_user(dev_priv
, vmw_fpriv(file_priv
), ret
,
3824 user_fence_rep
, fence
, handle
, out_fence_fd
,
3827 /* Don't unreference when handing fence out */
3828 if (unlikely(out_fence
!= NULL
)) {
3831 } else if (likely(fence
!= NULL
)) {
3832 vmw_fence_obj_unreference(&fence
);
3835 vmw_cmdbuf_res_commit(&sw_context
->staged_cmd_res
);
3836 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
3839 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
3840 * in resource destruction paths.
3842 vmw_validation_unref_lists(&val_ctx
);
3847 mutex_unlock(&dev_priv
->binding_mutex
);
3849 vmw_validation_bo_backoff(&val_ctx
);
3851 vmw_execbuf_bindings_commit(sw_context
, true);
3852 vmw_validation_res_unreserve(&val_ctx
, true);
3853 vmw_resource_relocations_free(&sw_context
->res_relocations
);
3854 vmw_free_relocations(sw_context
);
3855 if (unlikely(dev_priv
->pinned_bo
!= NULL
&& !dev_priv
->query_cid_valid
))
3856 __vmw_execbuf_release_pinned_bo(dev_priv
, NULL
);
3858 vmw_cmdbuf_res_revert(&sw_context
->staged_cmd_res
);
3859 vmw_validation_drop_ht(&val_ctx
);
3860 WARN_ON(!list_empty(&sw_context
->ctx_list
));
3861 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
3864 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
3865 * in resource destruction paths.
3867 vmw_validation_unref_lists(&val_ctx
);
3870 vmw_cmdbuf_header_free(header
);
3872 if (out_fence_fd
>= 0)
3873 put_unused_fd(out_fence_fd
);
3879 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
3881 * @dev_priv: The device private structure.
3883 * This function is called to idle the fifo and unpin the query buffer if the
3884 * normal way to do this hits an error, which should typically be extremely
3887 static void vmw_execbuf_unpin_panic(struct vmw_private
*dev_priv
)
3889 VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
3891 (void) vmw_fallback_wait(dev_priv
, false, true, 0, false, 10*HZ
);
3892 vmw_bo_pin_reserved(dev_priv
->pinned_bo
, false);
3893 if (dev_priv
->dummy_query_bo_pinned
) {
3894 vmw_bo_pin_reserved(dev_priv
->dummy_query_bo
, false);
3895 dev_priv
->dummy_query_bo_pinned
= false;
3901 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
3904 * @dev_priv: The device private structure.
3905 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
3906 * query barrier that flushes all queries touching the current buffer pointed to
3907 * by @dev_priv->pinned_bo
3909 * This function should be used to unpin the pinned query bo, or as a query
3910 * barrier when we need to make sure that all queries have finished before the
3911 * next fifo command. (For example on hardware context destructions where the
3912 * hardware may otherwise leak unfinished queries).
3914 * This function does not return any failure codes, but make attempts to do safe
3915 * unpinning in case of errors.
3917 * The function will synchronize on the previous query barrier, and will thus
3918 * not finish until that barrier has executed.
3920 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
3921 * calling this function.
3923 void __vmw_execbuf_release_pinned_bo(struct vmw_private
*dev_priv
,
3924 struct vmw_fence_obj
*fence
)
3927 struct vmw_fence_obj
*lfence
= NULL
;
3928 DECLARE_VAL_CONTEXT(val_ctx
, NULL
, 0);
3930 if (dev_priv
->pinned_bo
== NULL
)
3933 ret
= vmw_validation_add_bo(&val_ctx
, dev_priv
->pinned_bo
, false,
3936 goto out_no_reserve
;
3938 ret
= vmw_validation_add_bo(&val_ctx
, dev_priv
->dummy_query_bo
, false,
3941 goto out_no_reserve
;
3943 ret
= vmw_validation_bo_reserve(&val_ctx
, false);
3945 goto out_no_reserve
;
3947 if (dev_priv
->query_cid_valid
) {
3948 BUG_ON(fence
!= NULL
);
3949 ret
= vmw_fifo_emit_dummy_query(dev_priv
, dev_priv
->query_cid
);
3952 dev_priv
->query_cid_valid
= false;
3955 vmw_bo_pin_reserved(dev_priv
->pinned_bo
, false);
3956 if (dev_priv
->dummy_query_bo_pinned
) {
3957 vmw_bo_pin_reserved(dev_priv
->dummy_query_bo
, false);
3958 dev_priv
->dummy_query_bo_pinned
= false;
3960 if (fence
== NULL
) {
3961 (void) vmw_execbuf_fence_commands(NULL
, dev_priv
, &lfence
,
3965 vmw_validation_bo_fence(&val_ctx
, fence
);
3967 vmw_fence_obj_unreference(&lfence
);
3969 vmw_validation_unref_lists(&val_ctx
);
3970 vmw_bo_unreference(&dev_priv
->pinned_bo
);
3975 vmw_validation_bo_backoff(&val_ctx
);
3977 vmw_validation_unref_lists(&val_ctx
);
3978 vmw_execbuf_unpin_panic(dev_priv
);
3979 vmw_bo_unreference(&dev_priv
->pinned_bo
);
3983 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
3985 * @dev_priv: The device private structure.
3987 * This function should be used to unpin the pinned query bo, or as a query
3988 * barrier when we need to make sure that all queries have finished before the
3989 * next fifo command. (For example on hardware context destructions where the
3990 * hardware may otherwise leak unfinished queries).
3992 * This function does not return any failure codes, but make attempts to do safe
3993 * unpinning in case of errors.
3995 * The function will synchronize on the previous query barrier, and will thus
3996 * not finish until that barrier has executed.
3998 void vmw_execbuf_release_pinned_bo(struct vmw_private
*dev_priv
)
4000 mutex_lock(&dev_priv
->cmdbuf_mutex
);
4001 if (dev_priv
->query_cid_valid
)
4002 __vmw_execbuf_release_pinned_bo(dev_priv
, NULL
);
4003 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
4006 int vmw_execbuf_ioctl(struct drm_device
*dev
, void *data
,
4007 struct drm_file
*file_priv
)
4009 struct vmw_private
*dev_priv
= vmw_priv(dev
);
4010 struct drm_vmw_execbuf_arg
*arg
= data
;
4012 struct dma_fence
*in_fence
= NULL
;
4015 * Extend the ioctl argument while maintaining backwards compatibility:
4016 * We take different code paths depending on the value of arg->version.
4018 * Note: The ioctl argument is extended and zeropadded by core DRM.
4020 if (unlikely(arg
->version
> DRM_VMW_EXECBUF_VERSION
||
4021 arg
->version
== 0)) {
4022 VMW_DEBUG_USER("Incorrect execbuf version.\n");
4026 switch (arg
->version
) {
4028 /* For v1 core DRM have extended + zeropadded the data */
4029 arg
->context_handle
= (uint32_t) -1;
4033 /* For v2 and later core DRM would have correctly copied it */
4037 /* If imported a fence FD from elsewhere, then wait on it */
4038 if (arg
->flags
& DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD
) {
4039 in_fence
= sync_file_get_fence(arg
->imported_fence_fd
);
4042 VMW_DEBUG_USER("Cannot get imported fence\n");
4046 ret
= vmw_wait_dma_fence(dev_priv
->fman
, in_fence
);
4051 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
4052 if (unlikely(ret
!= 0))
4055 ret
= vmw_execbuf_process(file_priv
, dev_priv
,
4056 (void __user
*)(unsigned long)arg
->commands
,
4057 NULL
, arg
->command_size
, arg
->throttle_us
,
4058 arg
->context_handle
,
4059 (void __user
*)(unsigned long)arg
->fence_rep
,
4062 ttm_read_unlock(&dev_priv
->reservation_sem
);
4063 if (unlikely(ret
!= 0))
4066 vmw_kms_cursor_post_execbuf(dev_priv
);
4070 dma_fence_put(in_fence
);