1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_reg.h"
30 #include <drm/ttm/ttm_bo_api.h>
31 #include <drm/ttm/ttm_placement.h>
33 #define VMW_RES_HT_ORDER 12
36 * struct vmw_resource_relocation - Relocation info for resources
38 * @head: List head for the software context's relocation list.
39 * @res: Non-ref-counted pointer to the resource.
40 * @offset: Offset of 4 byte entries into the command buffer where the
41 * id that needs fixup is located.
43 struct vmw_resource_relocation
{
44 struct list_head head
;
45 const struct vmw_resource
*res
;
50 * struct vmw_resource_val_node - Validation info for resources
52 * @head: List head for the software context's resource list.
53 * @hash: Hash entry for quick resouce to val_node lookup.
54 * @res: Ref-counted pointer to the resource.
55 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
56 * @new_backup: Refcounted pointer to the new backup buffer.
57 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
58 * @first_usage: Set to true the first time the resource is referenced in
60 * @no_buffer_needed: Resources do not need to allocate buffer backup on
61 * reservation. The command stream will provide one.
63 struct vmw_resource_val_node
{
64 struct list_head head
;
65 struct drm_hash_item hash
;
66 struct vmw_resource
*res
;
67 struct vmw_dma_buffer
*new_backup
;
68 unsigned long new_backup_offset
;
70 bool no_buffer_needed
;
74 * vmw_resource_unreserve - unreserve resources previously reserved for
77 * @list_head: list of resources to unreserve.
78 * @backoff: Whether command submission failed.
80 static void vmw_resource_list_unreserve(struct list_head
*list
,
83 struct vmw_resource_val_node
*val
;
85 list_for_each_entry(val
, list
, head
) {
86 struct vmw_resource
*res
= val
->res
;
87 struct vmw_dma_buffer
*new_backup
=
88 backoff
? NULL
: val
->new_backup
;
90 vmw_resource_unreserve(res
, new_backup
,
91 val
->new_backup_offset
);
92 vmw_dmabuf_unreference(&val
->new_backup
);
98 * vmw_resource_val_add - Add a resource to the software context's
99 * resource list if it's not already on it.
101 * @sw_context: Pointer to the software context.
102 * @res: Pointer to the resource.
103 * @p_node On successful return points to a valid pointer to a
104 * struct vmw_resource_val_node, if non-NULL on entry.
106 static int vmw_resource_val_add(struct vmw_sw_context
*sw_context
,
107 struct vmw_resource
*res
,
108 struct vmw_resource_val_node
**p_node
)
110 struct vmw_resource_val_node
*node
;
111 struct drm_hash_item
*hash
;
114 if (likely(drm_ht_find_item(&sw_context
->res_ht
, (unsigned long) res
,
116 node
= container_of(hash
, struct vmw_resource_val_node
, hash
);
117 node
->first_usage
= false;
118 if (unlikely(p_node
!= NULL
))
123 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
124 if (unlikely(node
== NULL
)) {
125 DRM_ERROR("Failed to allocate a resource validation "
130 node
->hash
.key
= (unsigned long) res
;
131 ret
= drm_ht_insert_item(&sw_context
->res_ht
, &node
->hash
);
132 if (unlikely(ret
!= 0)) {
133 DRM_ERROR("Failed to initialize a resource validation "
138 list_add_tail(&node
->head
, &sw_context
->resource_list
);
139 node
->res
= vmw_resource_reference(res
);
140 node
->first_usage
= true;
142 if (unlikely(p_node
!= NULL
))
149 * vmw_resource_relocation_add - Add a relocation to the relocation list
151 * @list: Pointer to head of relocation list.
152 * @res: The resource.
153 * @offset: Offset into the command buffer currently being parsed where the
154 * id that needs fixup is located. Granularity is 4 bytes.
156 static int vmw_resource_relocation_add(struct list_head
*list
,
157 const struct vmw_resource
*res
,
158 unsigned long offset
)
160 struct vmw_resource_relocation
*rel
;
162 rel
= kmalloc(sizeof(*rel
), GFP_KERNEL
);
163 if (unlikely(rel
== NULL
)) {
164 DRM_ERROR("Failed to allocate a resource relocation.\n");
169 rel
->offset
= offset
;
170 list_add_tail(&rel
->head
, list
);
176 * vmw_resource_relocations_free - Free all relocations on a list
178 * @list: Pointer to the head of the relocation list.
180 static void vmw_resource_relocations_free(struct list_head
*list
)
182 struct vmw_resource_relocation
*rel
, *n
;
184 list_for_each_entry_safe(rel
, n
, list
, head
) {
185 list_del(&rel
->head
);
191 * vmw_resource_relocations_apply - Apply all relocations on a list
193 * @cb: Pointer to the start of the command buffer bein patch. This need
194 * not be the same buffer as the one being parsed when the relocation
195 * list was built, but the contents must be the same modulo the
197 * @list: Pointer to the head of the relocation list.
199 static void vmw_resource_relocations_apply(uint32_t *cb
,
200 struct list_head
*list
)
202 struct vmw_resource_relocation
*rel
;
204 list_for_each_entry(rel
, list
, head
)
205 cb
[rel
->offset
] = rel
->res
->id
;
208 static int vmw_cmd_invalid(struct vmw_private
*dev_priv
,
209 struct vmw_sw_context
*sw_context
,
210 SVGA3dCmdHeader
*header
)
212 return capable(CAP_SYS_ADMIN
) ? : -EINVAL
;
215 static int vmw_cmd_ok(struct vmw_private
*dev_priv
,
216 struct vmw_sw_context
*sw_context
,
217 SVGA3dCmdHeader
*header
)
223 * vmw_bo_to_validate_list - add a bo to a validate list
225 * @sw_context: The software context used for this command submission batch.
226 * @bo: The buffer object to add.
227 * @p_val_node: If non-NULL Will be updated with the validate node number
230 * Returns -EINVAL if the limit of number of buffer objects per command
231 * submission is reached.
233 static int vmw_bo_to_validate_list(struct vmw_sw_context
*sw_context
,
234 struct ttm_buffer_object
*bo
,
235 uint32_t *p_val_node
)
238 struct vmw_validate_buffer
*vval_buf
;
239 struct ttm_validate_buffer
*val_buf
;
240 struct drm_hash_item
*hash
;
243 if (likely(drm_ht_find_item(&sw_context
->res_ht
, (unsigned long) bo
,
245 vval_buf
= container_of(hash
, struct vmw_validate_buffer
,
247 val_buf
= &vval_buf
->base
;
248 val_node
= vval_buf
- sw_context
->val_bufs
;
250 val_node
= sw_context
->cur_val_buf
;
251 if (unlikely(val_node
>= VMWGFX_MAX_VALIDATIONS
)) {
252 DRM_ERROR("Max number of DMA buffers per submission "
256 vval_buf
= &sw_context
->val_bufs
[val_node
];
257 vval_buf
->hash
.key
= (unsigned long) bo
;
258 ret
= drm_ht_insert_item(&sw_context
->res_ht
, &vval_buf
->hash
);
259 if (unlikely(ret
!= 0)) {
260 DRM_ERROR("Failed to initialize a buffer validation "
264 ++sw_context
->cur_val_buf
;
265 val_buf
= &vval_buf
->base
;
266 val_buf
->bo
= ttm_bo_reference(bo
);
267 val_buf
->reserved
= false;
268 list_add_tail(&val_buf
->head
, &sw_context
->validate_nodes
);
271 sw_context
->fence_flags
|= DRM_VMW_FENCE_FLAG_EXEC
;
274 *p_val_node
= val_node
;
280 * vmw_resources_reserve - Reserve all resources on the sw_context's
283 * @sw_context: Pointer to the software context.
285 * Note that since vmware's command submission currently is protected by
286 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
287 * since only a single thread at once will attempt this.
289 static int vmw_resources_reserve(struct vmw_sw_context
*sw_context
)
291 struct vmw_resource_val_node
*val
;
294 list_for_each_entry(val
, &sw_context
->resource_list
, head
) {
295 struct vmw_resource
*res
= val
->res
;
297 ret
= vmw_resource_reserve(res
, val
->no_buffer_needed
);
298 if (unlikely(ret
!= 0))
302 struct ttm_buffer_object
*bo
= &res
->backup
->base
;
304 ret
= vmw_bo_to_validate_list
305 (sw_context
, bo
, NULL
);
307 if (unlikely(ret
!= 0))
315 * vmw_resources_validate - Validate all resources on the sw_context's
318 * @sw_context: Pointer to the software context.
320 * Before this function is called, all resource backup buffers must have
323 static int vmw_resources_validate(struct vmw_sw_context
*sw_context
)
325 struct vmw_resource_val_node
*val
;
328 list_for_each_entry(val
, &sw_context
->resource_list
, head
) {
329 struct vmw_resource
*res
= val
->res
;
331 ret
= vmw_resource_validate(res
);
332 if (unlikely(ret
!= 0)) {
333 if (ret
!= -ERESTARTSYS
)
334 DRM_ERROR("Failed to validate resource.\n");
342 * vmw_cmd_res_check - Check that a resource is present and if so, put it
343 * on the resource validate list unless it's already there.
345 * @dev_priv: Pointer to a device private structure.
346 * @sw_context: Pointer to the software context.
347 * @res_type: Resource type.
348 * @converter: User-space visisble type specific information.
349 * @id: Pointer to the location in the command buffer currently being
350 * parsed from where the user-space resource id handle is located.
352 static int vmw_cmd_res_check(struct vmw_private
*dev_priv
,
353 struct vmw_sw_context
*sw_context
,
354 enum vmw_res_type res_type
,
355 const struct vmw_user_resource_conv
*converter
,
357 struct vmw_resource_val_node
**p_val
)
359 struct vmw_res_cache_entry
*rcache
=
360 &sw_context
->res_cache
[res_type
];
361 struct vmw_resource
*res
;
362 struct vmw_resource_val_node
*node
;
365 if (*id
== SVGA3D_INVALID_ID
)
369 * Fastpath in case of repeated commands referencing the same
373 if (likely(rcache
->valid
&& *id
== rcache
->handle
)) {
374 const struct vmw_resource
*res
= rcache
->res
;
376 rcache
->node
->first_usage
= false;
378 *p_val
= rcache
->node
;
380 return vmw_resource_relocation_add
381 (&sw_context
->res_relocations
, res
,
382 id
- sw_context
->buf_start
);
385 ret
= vmw_user_resource_lookup_handle(dev_priv
,
390 if (unlikely(ret
!= 0)) {
391 DRM_ERROR("Could not find or use resource 0x%08x.\n",
397 rcache
->valid
= true;
399 rcache
->handle
= *id
;
401 ret
= vmw_resource_relocation_add(&sw_context
->res_relocations
,
403 id
- sw_context
->buf_start
);
404 if (unlikely(ret
!= 0))
407 ret
= vmw_resource_val_add(sw_context
, res
, &node
);
408 if (unlikely(ret
!= 0))
414 vmw_resource_unreference(&res
);
418 BUG_ON(sw_context
->error_resource
!= NULL
);
419 sw_context
->error_resource
= res
;
425 * vmw_cmd_cid_check - Check a command header for valid context information.
427 * @dev_priv: Pointer to a device private structure.
428 * @sw_context: Pointer to the software context.
429 * @header: A command header with an embedded user-space context handle.
431 * Convenience function: Call vmw_cmd_res_check with the user-space context
432 * handle embedded in @header.
434 static int vmw_cmd_cid_check(struct vmw_private
*dev_priv
,
435 struct vmw_sw_context
*sw_context
,
436 SVGA3dCmdHeader
*header
)
439 SVGA3dCmdHeader header
;
443 cmd
= container_of(header
, struct vmw_cid_cmd
, header
);
444 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
445 user_context_converter
, &cmd
->cid
, NULL
);
448 static int vmw_cmd_set_render_target_check(struct vmw_private
*dev_priv
,
449 struct vmw_sw_context
*sw_context
,
450 SVGA3dCmdHeader
*header
)
453 SVGA3dCmdHeader header
;
454 SVGA3dCmdSetRenderTarget body
;
458 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
459 if (unlikely(ret
!= 0))
462 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
463 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
464 user_surface_converter
,
465 &cmd
->body
.target
.sid
, NULL
);
469 static int vmw_cmd_surface_copy_check(struct vmw_private
*dev_priv
,
470 struct vmw_sw_context
*sw_context
,
471 SVGA3dCmdHeader
*header
)
474 SVGA3dCmdHeader header
;
475 SVGA3dCmdSurfaceCopy body
;
479 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
480 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
481 user_surface_converter
,
482 &cmd
->body
.src
.sid
, NULL
);
483 if (unlikely(ret
!= 0))
485 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
486 user_surface_converter
,
487 &cmd
->body
.dest
.sid
, NULL
);
490 static int vmw_cmd_stretch_blt_check(struct vmw_private
*dev_priv
,
491 struct vmw_sw_context
*sw_context
,
492 SVGA3dCmdHeader
*header
)
495 SVGA3dCmdHeader header
;
496 SVGA3dCmdSurfaceStretchBlt body
;
500 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
501 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
502 user_surface_converter
,
503 &cmd
->body
.src
.sid
, NULL
);
504 if (unlikely(ret
!= 0))
506 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
507 user_surface_converter
,
508 &cmd
->body
.dest
.sid
, NULL
);
511 static int vmw_cmd_blt_surf_screen_check(struct vmw_private
*dev_priv
,
512 struct vmw_sw_context
*sw_context
,
513 SVGA3dCmdHeader
*header
)
516 SVGA3dCmdHeader header
;
517 SVGA3dCmdBlitSurfaceToScreen body
;
520 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
522 if (unlikely(!sw_context
->kernel
)) {
523 DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd
->header
.id
);
527 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
528 user_surface_converter
,
529 &cmd
->body
.srcImage
.sid
, NULL
);
532 static int vmw_cmd_present_check(struct vmw_private
*dev_priv
,
533 struct vmw_sw_context
*sw_context
,
534 SVGA3dCmdHeader
*header
)
537 SVGA3dCmdHeader header
;
538 SVGA3dCmdPresent body
;
542 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
544 if (unlikely(!sw_context
->kernel
)) {
545 DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd
->header
.id
);
549 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
550 user_surface_converter
, &cmd
->body
.sid
,
555 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
557 * @dev_priv: The device private structure.
558 * @new_query_bo: The new buffer holding query results.
559 * @sw_context: The software context used for this command submission.
561 * This function checks whether @new_query_bo is suitable for holding
562 * query results, and if another buffer currently is pinned for query
563 * results. If so, the function prepares the state of @sw_context for
564 * switching pinned buffers after successful submission of the current
567 static int vmw_query_bo_switch_prepare(struct vmw_private
*dev_priv
,
568 struct ttm_buffer_object
*new_query_bo
,
569 struct vmw_sw_context
*sw_context
)
571 struct vmw_res_cache_entry
*ctx_entry
=
572 &sw_context
->res_cache
[vmw_res_context
];
575 BUG_ON(!ctx_entry
->valid
);
576 sw_context
->last_query_ctx
= ctx_entry
->res
;
578 if (unlikely(new_query_bo
!= sw_context
->cur_query_bo
)) {
580 if (unlikely(new_query_bo
->num_pages
> 4)) {
581 DRM_ERROR("Query buffer too large.\n");
585 if (unlikely(sw_context
->cur_query_bo
!= NULL
)) {
586 sw_context
->needs_post_query_barrier
= true;
587 ret
= vmw_bo_to_validate_list(sw_context
,
588 sw_context
->cur_query_bo
,
590 if (unlikely(ret
!= 0))
593 sw_context
->cur_query_bo
= new_query_bo
;
595 ret
= vmw_bo_to_validate_list(sw_context
,
596 dev_priv
->dummy_query_bo
,
598 if (unlikely(ret
!= 0))
608 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
610 * @dev_priv: The device private structure.
611 * @sw_context: The software context used for this command submission batch.
613 * This function will check if we're switching query buffers, and will then,
614 * issue a dummy occlusion query wait used as a query barrier. When the fence
615 * object following that query wait has signaled, we are sure that all
616 * preceding queries have finished, and the old query buffer can be unpinned.
617 * However, since both the new query buffer and the old one are fenced with
618 * that fence, we can do an asynchronus unpin now, and be sure that the
619 * old query buffer won't be moved until the fence has signaled.
621 * As mentioned above, both the new - and old query buffers need to be fenced
622 * using a sequence emitted *after* calling this function.
624 static void vmw_query_bo_switch_commit(struct vmw_private
*dev_priv
,
625 struct vmw_sw_context
*sw_context
)
628 * The validate list should still hold references to all
632 if (sw_context
->needs_post_query_barrier
) {
633 struct vmw_res_cache_entry
*ctx_entry
=
634 &sw_context
->res_cache
[vmw_res_context
];
635 struct vmw_resource
*ctx
;
638 BUG_ON(!ctx_entry
->valid
);
639 ctx
= ctx_entry
->res
;
641 ret
= vmw_fifo_emit_dummy_query(dev_priv
, ctx
->id
);
643 if (unlikely(ret
!= 0))
644 DRM_ERROR("Out of fifo space for dummy query.\n");
647 if (dev_priv
->pinned_bo
!= sw_context
->cur_query_bo
) {
648 if (dev_priv
->pinned_bo
) {
649 vmw_bo_pin(dev_priv
->pinned_bo
, false);
650 ttm_bo_unref(&dev_priv
->pinned_bo
);
653 if (!sw_context
->needs_post_query_barrier
) {
654 vmw_bo_pin(sw_context
->cur_query_bo
, true);
657 * We pin also the dummy_query_bo buffer so that we
658 * don't need to validate it when emitting
659 * dummy queries in context destroy paths.
662 vmw_bo_pin(dev_priv
->dummy_query_bo
, true);
663 dev_priv
->dummy_query_bo_pinned
= true;
665 BUG_ON(sw_context
->last_query_ctx
== NULL
);
666 dev_priv
->query_cid
= sw_context
->last_query_ctx
->id
;
667 dev_priv
->query_cid_valid
= true;
668 dev_priv
->pinned_bo
=
669 ttm_bo_reference(sw_context
->cur_query_bo
);
675 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
676 * handle to a valid SVGAGuestPtr
678 * @dev_priv: Pointer to a device private structure.
679 * @sw_context: The software context used for this command batch validation.
680 * @ptr: Pointer to the user-space handle to be translated.
681 * @vmw_bo_p: Points to a location that, on successful return will carry
682 * a reference-counted pointer to the DMA buffer identified by the
683 * user-space handle in @id.
685 * This function saves information needed to translate a user-space buffer
686 * handle to a valid SVGAGuestPtr. The translation does not take place
687 * immediately, but during a call to vmw_apply_relocations().
688 * This function builds a relocation list and a list of buffers to validate.
689 * The former needs to be freed using either vmw_apply_relocations() or
690 * vmw_free_relocations(). The latter needs to be freed using
691 * vmw_clear_validations.
693 static int vmw_translate_guest_ptr(struct vmw_private
*dev_priv
,
694 struct vmw_sw_context
*sw_context
,
696 struct vmw_dma_buffer
**vmw_bo_p
)
698 struct vmw_dma_buffer
*vmw_bo
= NULL
;
699 struct ttm_buffer_object
*bo
;
700 uint32_t handle
= ptr
->gmrId
;
701 struct vmw_relocation
*reloc
;
704 ret
= vmw_user_dmabuf_lookup(sw_context
->tfile
, handle
, &vmw_bo
);
705 if (unlikely(ret
!= 0)) {
706 DRM_ERROR("Could not find or use GMR region.\n");
711 if (unlikely(sw_context
->cur_reloc
>= VMWGFX_MAX_RELOCATIONS
)) {
712 DRM_ERROR("Max number relocations per submission"
718 reloc
= &sw_context
->relocs
[sw_context
->cur_reloc
++];
719 reloc
->location
= ptr
;
721 ret
= vmw_bo_to_validate_list(sw_context
, bo
, &reloc
->index
);
722 if (unlikely(ret
!= 0))
729 vmw_dmabuf_unreference(&vmw_bo
);
735 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
737 * @dev_priv: Pointer to a device private struct.
738 * @sw_context: The software context used for this command submission.
739 * @header: Pointer to the command header in the command stream.
741 static int vmw_cmd_begin_query(struct vmw_private
*dev_priv
,
742 struct vmw_sw_context
*sw_context
,
743 SVGA3dCmdHeader
*header
)
745 struct vmw_begin_query_cmd
{
746 SVGA3dCmdHeader header
;
747 SVGA3dCmdBeginQuery q
;
750 cmd
= container_of(header
, struct vmw_begin_query_cmd
,
753 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
754 user_context_converter
, &cmd
->q
.cid
,
759 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
761 * @dev_priv: Pointer to a device private struct.
762 * @sw_context: The software context used for this command submission.
763 * @header: Pointer to the command header in the command stream.
765 static int vmw_cmd_end_query(struct vmw_private
*dev_priv
,
766 struct vmw_sw_context
*sw_context
,
767 SVGA3dCmdHeader
*header
)
769 struct vmw_dma_buffer
*vmw_bo
;
770 struct vmw_query_cmd
{
771 SVGA3dCmdHeader header
;
776 cmd
= container_of(header
, struct vmw_query_cmd
, header
);
777 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
778 if (unlikely(ret
!= 0))
781 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
784 if (unlikely(ret
!= 0))
787 ret
= vmw_query_bo_switch_prepare(dev_priv
, &vmw_bo
->base
, sw_context
);
789 vmw_dmabuf_unreference(&vmw_bo
);
794 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
796 * @dev_priv: Pointer to a device private struct.
797 * @sw_context: The software context used for this command submission.
798 * @header: Pointer to the command header in the command stream.
800 static int vmw_cmd_wait_query(struct vmw_private
*dev_priv
,
801 struct vmw_sw_context
*sw_context
,
802 SVGA3dCmdHeader
*header
)
804 struct vmw_dma_buffer
*vmw_bo
;
805 struct vmw_query_cmd
{
806 SVGA3dCmdHeader header
;
807 SVGA3dCmdWaitForQuery q
;
811 cmd
= container_of(header
, struct vmw_query_cmd
, header
);
812 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
813 if (unlikely(ret
!= 0))
816 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
819 if (unlikely(ret
!= 0))
822 vmw_dmabuf_unreference(&vmw_bo
);
826 static int vmw_cmd_dma(struct vmw_private
*dev_priv
,
827 struct vmw_sw_context
*sw_context
,
828 SVGA3dCmdHeader
*header
)
830 struct vmw_dma_buffer
*vmw_bo
= NULL
;
831 struct vmw_surface
*srf
= NULL
;
833 SVGA3dCmdHeader header
;
834 SVGA3dCmdSurfaceDMA dma
;
837 SVGA3dCmdSurfaceDMASuffix
*suffix
;
840 cmd
= container_of(header
, struct vmw_dma_cmd
, header
);
841 suffix
= (SVGA3dCmdSurfaceDMASuffix
*)((unsigned long) &cmd
->dma
+
842 header
->size
- sizeof(*suffix
));
844 /* Make sure device and verifier stays in sync. */
845 if (unlikely(suffix
->suffixSize
!= sizeof(*suffix
))) {
846 DRM_ERROR("Invalid DMA suffix size.\n");
850 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
853 if (unlikely(ret
!= 0))
856 /* Make sure DMA doesn't cross BO boundaries. */
857 bo_size
= vmw_bo
->base
.num_pages
* PAGE_SIZE
;
858 if (unlikely(cmd
->dma
.guest
.ptr
.offset
> bo_size
)) {
859 DRM_ERROR("Invalid DMA offset.\n");
863 bo_size
-= cmd
->dma
.guest
.ptr
.offset
;
864 if (unlikely(suffix
->maximumOffset
> bo_size
))
865 suffix
->maximumOffset
= bo_size
;
867 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
868 user_surface_converter
, &cmd
->dma
.host
.sid
,
870 if (unlikely(ret
!= 0)) {
871 if (unlikely(ret
!= -ERESTARTSYS
))
872 DRM_ERROR("could not find surface for DMA.\n");
876 srf
= vmw_res_to_srf(sw_context
->res_cache
[vmw_res_surface
].res
);
878 vmw_kms_cursor_snoop(srf
, sw_context
->tfile
, &vmw_bo
->base
, header
);
881 vmw_dmabuf_unreference(&vmw_bo
);
885 static int vmw_cmd_draw(struct vmw_private
*dev_priv
,
886 struct vmw_sw_context
*sw_context
,
887 SVGA3dCmdHeader
*header
)
889 struct vmw_draw_cmd
{
890 SVGA3dCmdHeader header
;
891 SVGA3dCmdDrawPrimitives body
;
893 SVGA3dVertexDecl
*decl
= (SVGA3dVertexDecl
*)(
894 (unsigned long)header
+ sizeof(*cmd
));
895 SVGA3dPrimitiveRange
*range
;
900 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
901 if (unlikely(ret
!= 0))
904 cmd
= container_of(header
, struct vmw_draw_cmd
, header
);
905 maxnum
= (header
->size
- sizeof(cmd
->body
)) / sizeof(*decl
);
907 if (unlikely(cmd
->body
.numVertexDecls
> maxnum
)) {
908 DRM_ERROR("Illegal number of vertex declarations.\n");
912 for (i
= 0; i
< cmd
->body
.numVertexDecls
; ++i
, ++decl
) {
913 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
914 user_surface_converter
,
915 &decl
->array
.surfaceId
, NULL
);
916 if (unlikely(ret
!= 0))
920 maxnum
= (header
->size
- sizeof(cmd
->body
) -
921 cmd
->body
.numVertexDecls
* sizeof(*decl
)) / sizeof(*range
);
922 if (unlikely(cmd
->body
.numRanges
> maxnum
)) {
923 DRM_ERROR("Illegal number of index ranges.\n");
927 range
= (SVGA3dPrimitiveRange
*) decl
;
928 for (i
= 0; i
< cmd
->body
.numRanges
; ++i
, ++range
) {
929 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
930 user_surface_converter
,
931 &range
->indexArray
.surfaceId
, NULL
);
932 if (unlikely(ret
!= 0))
939 static int vmw_cmd_tex_state(struct vmw_private
*dev_priv
,
940 struct vmw_sw_context
*sw_context
,
941 SVGA3dCmdHeader
*header
)
943 struct vmw_tex_state_cmd
{
944 SVGA3dCmdHeader header
;
945 SVGA3dCmdSetTextureState state
;
948 SVGA3dTextureState
*last_state
= (SVGA3dTextureState
*)
949 ((unsigned long) header
+ header
->size
+ sizeof(header
));
950 SVGA3dTextureState
*cur_state
= (SVGA3dTextureState
*)
951 ((unsigned long) header
+ sizeof(struct vmw_tex_state_cmd
));
954 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
955 if (unlikely(ret
!= 0))
958 for (; cur_state
< last_state
; ++cur_state
) {
959 if (likely(cur_state
->name
!= SVGA3D_TS_BIND_TEXTURE
))
962 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
963 user_surface_converter
,
964 &cur_state
->value
, NULL
);
965 if (unlikely(ret
!= 0))
972 static int vmw_cmd_check_define_gmrfb(struct vmw_private
*dev_priv
,
973 struct vmw_sw_context
*sw_context
,
976 struct vmw_dma_buffer
*vmw_bo
;
981 SVGAFifoCmdDefineGMRFB body
;
984 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
987 if (unlikely(ret
!= 0))
990 vmw_dmabuf_unreference(&vmw_bo
);
996 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
999 * @dev_priv: Pointer to a device private struct.
1000 * @sw_context: The software context being used for this batch.
1001 * @header: Pointer to the command header in the command stream.
1003 static int vmw_cmd_set_shader(struct vmw_private
*dev_priv
,
1004 struct vmw_sw_context
*sw_context
,
1005 SVGA3dCmdHeader
*header
)
1007 struct vmw_set_shader_cmd
{
1008 SVGA3dCmdHeader header
;
1009 SVGA3dCmdSetShader body
;
1013 cmd
= container_of(header
, struct vmw_set_shader_cmd
,
1016 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1017 if (unlikely(ret
!= 0))
1023 static int vmw_cmd_check_not_3d(struct vmw_private
*dev_priv
,
1024 struct vmw_sw_context
*sw_context
,
1025 void *buf
, uint32_t *size
)
1027 uint32_t size_remaining
= *size
;
1030 cmd_id
= le32_to_cpu(((uint32_t *)buf
)[0]);
1032 case SVGA_CMD_UPDATE
:
1033 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate
);
1035 case SVGA_CMD_DEFINE_GMRFB
:
1036 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB
);
1038 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN
:
1039 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen
);
1041 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB
:
1042 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen
);
1045 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id
);
1049 if (*size
> size_remaining
) {
1050 DRM_ERROR("Invalid SVGA command (size mismatch):"
1055 if (unlikely(!sw_context
->kernel
)) {
1056 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id
);
1060 if (cmd_id
== SVGA_CMD_DEFINE_GMRFB
)
1061 return vmw_cmd_check_define_gmrfb(dev_priv
, sw_context
, buf
);
1066 typedef int (*vmw_cmd_func
) (struct vmw_private
*,
1067 struct vmw_sw_context
*,
1070 #define VMW_CMD_DEF(cmd, func) \
1071 [cmd - SVGA_3D_CMD_BASE] = func
1073 static vmw_cmd_func vmw_cmd_funcs
[SVGA_3D_CMD_MAX
] = {
1074 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE
, &vmw_cmd_invalid
),
1075 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY
, &vmw_cmd_invalid
),
1076 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY
, &vmw_cmd_surface_copy_check
),
1077 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT
, &vmw_cmd_stretch_blt_check
),
1078 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA
, &vmw_cmd_dma
),
1079 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE
, &vmw_cmd_invalid
),
1080 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY
, &vmw_cmd_invalid
),
1081 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM
, &vmw_cmd_cid_check
),
1082 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE
, &vmw_cmd_cid_check
),
1083 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE
, &vmw_cmd_cid_check
),
1084 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET
,
1085 &vmw_cmd_set_render_target_check
),
1086 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE
, &vmw_cmd_tex_state
),
1087 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL
, &vmw_cmd_cid_check
),
1088 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA
, &vmw_cmd_cid_check
),
1089 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED
, &vmw_cmd_cid_check
),
1090 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT
, &vmw_cmd_cid_check
),
1091 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE
, &vmw_cmd_cid_check
),
1092 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR
, &vmw_cmd_cid_check
),
1093 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT
, &vmw_cmd_present_check
),
1094 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE
, &vmw_cmd_cid_check
),
1095 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY
, &vmw_cmd_cid_check
),
1096 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER
, &vmw_cmd_set_shader
),
1097 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST
, &vmw_cmd_cid_check
),
1098 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES
, &vmw_cmd_draw
),
1099 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT
, &vmw_cmd_cid_check
),
1100 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY
, &vmw_cmd_begin_query
),
1101 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY
, &vmw_cmd_end_query
),
1102 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY
, &vmw_cmd_wait_query
),
1103 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK
, &vmw_cmd_ok
),
1104 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN
,
1105 &vmw_cmd_blt_surf_screen_check
),
1106 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2
, &vmw_cmd_invalid
),
1107 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS
, &vmw_cmd_invalid
),
1108 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE
, &vmw_cmd_invalid
),
1109 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE
, &vmw_cmd_invalid
),
1112 static int vmw_cmd_check(struct vmw_private
*dev_priv
,
1113 struct vmw_sw_context
*sw_context
,
1114 void *buf
, uint32_t *size
)
1117 uint32_t size_remaining
= *size
;
1118 SVGA3dCmdHeader
*header
= (SVGA3dCmdHeader
*) buf
;
1121 cmd_id
= le32_to_cpu(((uint32_t *)buf
)[0]);
1122 /* Handle any none 3D commands */
1123 if (unlikely(cmd_id
< SVGA_CMD_MAX
))
1124 return vmw_cmd_check_not_3d(dev_priv
, sw_context
, buf
, size
);
1127 cmd_id
= le32_to_cpu(header
->id
);
1128 *size
= le32_to_cpu(header
->size
) + sizeof(SVGA3dCmdHeader
);
1130 cmd_id
-= SVGA_3D_CMD_BASE
;
1131 if (unlikely(*size
> size_remaining
))
1134 if (unlikely(cmd_id
>= SVGA_3D_CMD_MAX
- SVGA_3D_CMD_BASE
))
1137 ret
= vmw_cmd_funcs
[cmd_id
](dev_priv
, sw_context
, header
);
1138 if (unlikely(ret
!= 0))
1143 DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
1144 cmd_id
+ SVGA_3D_CMD_BASE
);
1148 static int vmw_cmd_check_all(struct vmw_private
*dev_priv
,
1149 struct vmw_sw_context
*sw_context
,
1153 int32_t cur_size
= size
;
1156 sw_context
->buf_start
= buf
;
1158 while (cur_size
> 0) {
1160 ret
= vmw_cmd_check(dev_priv
, sw_context
, buf
, &size
);
1161 if (unlikely(ret
!= 0))
1163 buf
= (void *)((unsigned long) buf
+ size
);
1167 if (unlikely(cur_size
!= 0)) {
1168 DRM_ERROR("Command verifier out of sync.\n");
1175 static void vmw_free_relocations(struct vmw_sw_context
*sw_context
)
1177 sw_context
->cur_reloc
= 0;
1180 static void vmw_apply_relocations(struct vmw_sw_context
*sw_context
)
1183 struct vmw_relocation
*reloc
;
1184 struct ttm_validate_buffer
*validate
;
1185 struct ttm_buffer_object
*bo
;
1187 for (i
= 0; i
< sw_context
->cur_reloc
; ++i
) {
1188 reloc
= &sw_context
->relocs
[i
];
1189 validate
= &sw_context
->val_bufs
[reloc
->index
].base
;
1191 switch (bo
->mem
.mem_type
) {
1193 reloc
->location
->offset
+= bo
->offset
;
1194 reloc
->location
->gmrId
= SVGA_GMR_FRAMEBUFFER
;
1197 reloc
->location
->gmrId
= bo
->mem
.start
;
1203 vmw_free_relocations(sw_context
);
1207 * vmw_resource_list_unrefererence - Free up a resource list and unreference
1208 * all resources referenced by it.
1210 * @list: The resource list.
1212 static void vmw_resource_list_unreference(struct list_head
*list
)
1214 struct vmw_resource_val_node
*val
, *val_next
;
1217 * Drop references to resources held during command submission.
1220 list_for_each_entry_safe(val
, val_next
, list
, head
) {
1221 list_del_init(&val
->head
);
1222 vmw_resource_unreference(&val
->res
);
1227 static void vmw_clear_validations(struct vmw_sw_context
*sw_context
)
1229 struct vmw_validate_buffer
*entry
, *next
;
1230 struct vmw_resource_val_node
*val
;
1233 * Drop references to DMA buffers held during command submission.
1235 list_for_each_entry_safe(entry
, next
, &sw_context
->validate_nodes
,
1237 list_del(&entry
->base
.head
);
1238 ttm_bo_unref(&entry
->base
.bo
);
1239 (void) drm_ht_remove_item(&sw_context
->res_ht
, &entry
->hash
);
1240 sw_context
->cur_val_buf
--;
1242 BUG_ON(sw_context
->cur_val_buf
!= 0);
1244 list_for_each_entry(val
, &sw_context
->resource_list
, head
)
1245 (void) drm_ht_remove_item(&sw_context
->res_ht
, &val
->hash
);
1248 static int vmw_validate_single_buffer(struct vmw_private
*dev_priv
,
1249 struct ttm_buffer_object
*bo
)
1255 * Don't validate pinned buffers.
1258 if (bo
== dev_priv
->pinned_bo
||
1259 (bo
== dev_priv
->dummy_query_bo
&&
1260 dev_priv
->dummy_query_bo_pinned
))
1264 * Put BO in VRAM if there is space, otherwise as a GMR.
1265 * If there is no space in VRAM and GMR ids are all used up,
1266 * start evicting GMRs to make room. If the DMA buffer can't be
1267 * used as a GMR, this will return -ENOMEM.
1270 ret
= ttm_bo_validate(bo
, &vmw_vram_gmr_placement
, true, false);
1271 if (likely(ret
== 0 || ret
== -ERESTARTSYS
))
1275 * If that failed, try VRAM again, this time evicting
1276 * previous contents.
1279 DRM_INFO("Falling through to VRAM.\n");
1280 ret
= ttm_bo_validate(bo
, &vmw_vram_placement
, true, false);
1285 static int vmw_validate_buffers(struct vmw_private
*dev_priv
,
1286 struct vmw_sw_context
*sw_context
)
1288 struct vmw_validate_buffer
*entry
;
1291 list_for_each_entry(entry
, &sw_context
->validate_nodes
, base
.head
) {
1292 ret
= vmw_validate_single_buffer(dev_priv
, entry
->base
.bo
);
1293 if (unlikely(ret
!= 0))
1299 static int vmw_resize_cmd_bounce(struct vmw_sw_context
*sw_context
,
1302 if (likely(sw_context
->cmd_bounce_size
>= size
))
1305 if (sw_context
->cmd_bounce_size
== 0)
1306 sw_context
->cmd_bounce_size
= VMWGFX_CMD_BOUNCE_INIT_SIZE
;
1308 while (sw_context
->cmd_bounce_size
< size
) {
1309 sw_context
->cmd_bounce_size
=
1310 PAGE_ALIGN(sw_context
->cmd_bounce_size
+
1311 (sw_context
->cmd_bounce_size
>> 1));
1314 if (sw_context
->cmd_bounce
!= NULL
)
1315 vfree(sw_context
->cmd_bounce
);
1317 sw_context
->cmd_bounce
= vmalloc(sw_context
->cmd_bounce_size
);
1319 if (sw_context
->cmd_bounce
== NULL
) {
1320 DRM_ERROR("Failed to allocate command bounce buffer.\n");
1321 sw_context
->cmd_bounce_size
= 0;
1329 * vmw_execbuf_fence_commands - create and submit a command stream fence
1331 * Creates a fence object and submits a command stream marker.
1332 * If this fails for some reason, We sync the fifo and return NULL.
1333 * It is then safe to fence buffers with a NULL pointer.
1335 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
1336 * a userspace handle if @p_handle is not NULL, otherwise not.
1339 int vmw_execbuf_fence_commands(struct drm_file
*file_priv
,
1340 struct vmw_private
*dev_priv
,
1341 struct vmw_fence_obj
**p_fence
,
1346 bool synced
= false;
1348 /* p_handle implies file_priv. */
1349 BUG_ON(p_handle
!= NULL
&& file_priv
== NULL
);
1351 ret
= vmw_fifo_send_fence(dev_priv
, &sequence
);
1352 if (unlikely(ret
!= 0)) {
1353 DRM_ERROR("Fence submission error. Syncing.\n");
1357 if (p_handle
!= NULL
)
1358 ret
= vmw_user_fence_create(file_priv
, dev_priv
->fman
,
1360 DRM_VMW_FENCE_FLAG_EXEC
,
1363 ret
= vmw_fence_create(dev_priv
->fman
, sequence
,
1364 DRM_VMW_FENCE_FLAG_EXEC
,
1367 if (unlikely(ret
!= 0 && !synced
)) {
1368 (void) vmw_fallback_wait(dev_priv
, false, false,
1370 VMW_FENCE_WAIT_TIMEOUT
);
1378 * vmw_execbuf_copy_fence_user - copy fence object information to
1381 * @dev_priv: Pointer to a vmw_private struct.
1382 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
1383 * @ret: Return value from fence object creation.
1384 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
1385 * which the information should be copied.
1386 * @fence: Pointer to the fenc object.
1387 * @fence_handle: User-space fence handle.
1389 * This function copies fence information to user-space. If copying fails,
1390 * The user-space struct drm_vmw_fence_rep::error member is hopefully
1391 * left untouched, and if it's preloaded with an -EFAULT by user-space,
1392 * the error will hopefully be detected.
1393 * Also if copying fails, user-space will be unable to signal the fence
1394 * object so we wait for it immediately, and then unreference the
1395 * user-space reference.
1398 vmw_execbuf_copy_fence_user(struct vmw_private
*dev_priv
,
1399 struct vmw_fpriv
*vmw_fp
,
1401 struct drm_vmw_fence_rep __user
*user_fence_rep
,
1402 struct vmw_fence_obj
*fence
,
1403 uint32_t fence_handle
)
1405 struct drm_vmw_fence_rep fence_rep
;
1407 if (user_fence_rep
== NULL
)
1410 memset(&fence_rep
, 0, sizeof(fence_rep
));
1412 fence_rep
.error
= ret
;
1414 BUG_ON(fence
== NULL
);
1416 fence_rep
.handle
= fence_handle
;
1417 fence_rep
.seqno
= fence
->seqno
;
1418 vmw_update_seqno(dev_priv
, &dev_priv
->fifo
);
1419 fence_rep
.passed_seqno
= dev_priv
->last_read_seqno
;
1423 * copy_to_user errors will be detected by user space not
1424 * seeing fence_rep::error filled in. Typically
1425 * user-space would have pre-set that member to -EFAULT.
1427 ret
= copy_to_user(user_fence_rep
, &fence_rep
,
1431 * User-space lost the fence object. We need to sync
1432 * and unreference the handle.
1434 if (unlikely(ret
!= 0) && (fence_rep
.error
== 0)) {
1435 ttm_ref_object_base_unref(vmw_fp
->tfile
,
1436 fence_handle
, TTM_REF_USAGE
);
1437 DRM_ERROR("Fence copy error. Syncing.\n");
1438 (void) vmw_fence_obj_wait(fence
, fence
->signal_mask
,
1440 VMW_FENCE_WAIT_TIMEOUT
);
1444 int vmw_execbuf_process(struct drm_file
*file_priv
,
1445 struct vmw_private
*dev_priv
,
1446 void __user
*user_commands
,
1447 void *kernel_commands
,
1448 uint32_t command_size
,
1449 uint64_t throttle_us
,
1450 struct drm_vmw_fence_rep __user
*user_fence_rep
,
1451 struct vmw_fence_obj
**out_fence
)
1453 struct vmw_sw_context
*sw_context
= &dev_priv
->ctx
;
1454 struct vmw_fence_obj
*fence
= NULL
;
1455 struct vmw_resource
*error_resource
;
1456 struct list_head resource_list
;
1457 struct ww_acquire_ctx ticket
;
1462 ret
= mutex_lock_interruptible(&dev_priv
->cmdbuf_mutex
);
1463 if (unlikely(ret
!= 0))
1464 return -ERESTARTSYS
;
1466 if (kernel_commands
== NULL
) {
1467 sw_context
->kernel
= false;
1469 ret
= vmw_resize_cmd_bounce(sw_context
, command_size
);
1470 if (unlikely(ret
!= 0))
1474 ret
= copy_from_user(sw_context
->cmd_bounce
,
1475 user_commands
, command_size
);
1477 if (unlikely(ret
!= 0)) {
1479 DRM_ERROR("Failed copying commands.\n");
1482 kernel_commands
= sw_context
->cmd_bounce
;
1484 sw_context
->kernel
= true;
1486 sw_context
->tfile
= vmw_fpriv(file_priv
)->tfile
;
1487 sw_context
->cur_reloc
= 0;
1488 sw_context
->cur_val_buf
= 0;
1489 sw_context
->fence_flags
= 0;
1490 INIT_LIST_HEAD(&sw_context
->resource_list
);
1491 sw_context
->cur_query_bo
= dev_priv
->pinned_bo
;
1492 sw_context
->last_query_ctx
= NULL
;
1493 sw_context
->needs_post_query_barrier
= false;
1494 memset(sw_context
->res_cache
, 0, sizeof(sw_context
->res_cache
));
1495 INIT_LIST_HEAD(&sw_context
->validate_nodes
);
1496 INIT_LIST_HEAD(&sw_context
->res_relocations
);
1497 if (!sw_context
->res_ht_initialized
) {
1498 ret
= drm_ht_create(&sw_context
->res_ht
, VMW_RES_HT_ORDER
);
1499 if (unlikely(ret
!= 0))
1501 sw_context
->res_ht_initialized
= true;
1504 INIT_LIST_HEAD(&resource_list
);
1505 ret
= vmw_cmd_check_all(dev_priv
, sw_context
, kernel_commands
,
1507 if (unlikely(ret
!= 0))
1510 ret
= vmw_resources_reserve(sw_context
);
1511 if (unlikely(ret
!= 0))
1514 ret
= ttm_eu_reserve_buffers(&ticket
, &sw_context
->validate_nodes
);
1515 if (unlikely(ret
!= 0))
1518 ret
= vmw_validate_buffers(dev_priv
, sw_context
);
1519 if (unlikely(ret
!= 0))
1522 ret
= vmw_resources_validate(sw_context
);
1523 if (unlikely(ret
!= 0))
1527 ret
= vmw_wait_lag(dev_priv
, &dev_priv
->fifo
.marker_queue
,
1530 if (unlikely(ret
!= 0))
1534 cmd
= vmw_fifo_reserve(dev_priv
, command_size
);
1535 if (unlikely(cmd
== NULL
)) {
1536 DRM_ERROR("Failed reserving fifo space for commands.\n");
1541 vmw_apply_relocations(sw_context
);
1542 memcpy(cmd
, kernel_commands
, command_size
);
1544 vmw_resource_relocations_apply(cmd
, &sw_context
->res_relocations
);
1545 vmw_resource_relocations_free(&sw_context
->res_relocations
);
1547 vmw_fifo_commit(dev_priv
, command_size
);
1549 vmw_query_bo_switch_commit(dev_priv
, sw_context
);
1550 ret
= vmw_execbuf_fence_commands(file_priv
, dev_priv
,
1552 (user_fence_rep
) ? &handle
: NULL
);
1554 * This error is harmless, because if fence submission fails,
1555 * vmw_fifo_send_fence will sync. The error will be propagated to
1556 * user-space in @fence_rep
1560 DRM_ERROR("Fence submission error. Syncing.\n");
1562 vmw_resource_list_unreserve(&sw_context
->resource_list
, false);
1563 ttm_eu_fence_buffer_objects(&ticket
, &sw_context
->validate_nodes
,
1566 if (unlikely(dev_priv
->pinned_bo
!= NULL
&&
1567 !dev_priv
->query_cid_valid
))
1568 __vmw_execbuf_release_pinned_bo(dev_priv
, fence
);
1570 vmw_clear_validations(sw_context
);
1571 vmw_execbuf_copy_fence_user(dev_priv
, vmw_fpriv(file_priv
), ret
,
1572 user_fence_rep
, fence
, handle
);
1574 /* Don't unreference when handing fence out */
1575 if (unlikely(out_fence
!= NULL
)) {
1578 } else if (likely(fence
!= NULL
)) {
1579 vmw_fence_obj_unreference(&fence
);
1582 list_splice_init(&sw_context
->resource_list
, &resource_list
);
1583 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
1586 * Unreference resources outside of the cmdbuf_mutex to
1587 * avoid deadlocks in resource destruction paths.
1589 vmw_resource_list_unreference(&resource_list
);
1594 ttm_eu_backoff_reservation(&ticket
, &sw_context
->validate_nodes
);
1596 vmw_resource_list_unreserve(&sw_context
->resource_list
, true);
1597 vmw_resource_relocations_free(&sw_context
->res_relocations
);
1598 vmw_free_relocations(sw_context
);
1599 vmw_clear_validations(sw_context
);
1600 if (unlikely(dev_priv
->pinned_bo
!= NULL
&&
1601 !dev_priv
->query_cid_valid
))
1602 __vmw_execbuf_release_pinned_bo(dev_priv
, NULL
);
1604 list_splice_init(&sw_context
->resource_list
, &resource_list
);
1605 error_resource
= sw_context
->error_resource
;
1606 sw_context
->error_resource
= NULL
;
1607 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
1610 * Unreference resources outside of the cmdbuf_mutex to
1611 * avoid deadlocks in resource destruction paths.
1613 vmw_resource_list_unreference(&resource_list
);
1614 if (unlikely(error_resource
!= NULL
))
1615 vmw_resource_unreference(&error_resource
);
1621 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
1623 * @dev_priv: The device private structure.
1625 * This function is called to idle the fifo and unpin the query buffer
1626 * if the normal way to do this hits an error, which should typically be
1629 static void vmw_execbuf_unpin_panic(struct vmw_private
*dev_priv
)
1631 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
1633 (void) vmw_fallback_wait(dev_priv
, false, true, 0, false, 10*HZ
);
1634 vmw_bo_pin(dev_priv
->pinned_bo
, false);
1635 vmw_bo_pin(dev_priv
->dummy_query_bo
, false);
1636 dev_priv
->dummy_query_bo_pinned
= false;
1641 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
1644 * @dev_priv: The device private structure.
1645 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
1646 * _after_ a query barrier that flushes all queries touching the current
1647 * buffer pointed to by @dev_priv->pinned_bo
1649 * This function should be used to unpin the pinned query bo, or
1650 * as a query barrier when we need to make sure that all queries have
1651 * finished before the next fifo command. (For example on hardware
1652 * context destructions where the hardware may otherwise leak unfinished
1655 * This function does not return any failure codes, but make attempts
1656 * to do safe unpinning in case of errors.
1658 * The function will synchronize on the previous query barrier, and will
1659 * thus not finish until that barrier has executed.
1661 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
1662 * before calling this function.
1664 void __vmw_execbuf_release_pinned_bo(struct vmw_private
*dev_priv
,
1665 struct vmw_fence_obj
*fence
)
1668 struct list_head validate_list
;
1669 struct ttm_validate_buffer pinned_val
, query_val
;
1670 struct vmw_fence_obj
*lfence
= NULL
;
1671 struct ww_acquire_ctx ticket
;
1673 if (dev_priv
->pinned_bo
== NULL
)
1676 INIT_LIST_HEAD(&validate_list
);
1678 pinned_val
.bo
= ttm_bo_reference(dev_priv
->pinned_bo
);
1679 list_add_tail(&pinned_val
.head
, &validate_list
);
1681 query_val
.bo
= ttm_bo_reference(dev_priv
->dummy_query_bo
);
1682 list_add_tail(&query_val
.head
, &validate_list
);
1685 ret
= ttm_eu_reserve_buffers(&ticket
, &validate_list
);
1686 } while (ret
== -ERESTARTSYS
);
1688 if (unlikely(ret
!= 0)) {
1689 vmw_execbuf_unpin_panic(dev_priv
);
1690 goto out_no_reserve
;
1693 if (dev_priv
->query_cid_valid
) {
1694 BUG_ON(fence
!= NULL
);
1695 ret
= vmw_fifo_emit_dummy_query(dev_priv
, dev_priv
->query_cid
);
1696 if (unlikely(ret
!= 0)) {
1697 vmw_execbuf_unpin_panic(dev_priv
);
1700 dev_priv
->query_cid_valid
= false;
1703 vmw_bo_pin(dev_priv
->pinned_bo
, false);
1704 vmw_bo_pin(dev_priv
->dummy_query_bo
, false);
1705 dev_priv
->dummy_query_bo_pinned
= false;
1707 if (fence
== NULL
) {
1708 (void) vmw_execbuf_fence_commands(NULL
, dev_priv
, &lfence
,
1712 ttm_eu_fence_buffer_objects(&ticket
, &validate_list
, (void *) fence
);
1714 vmw_fence_obj_unreference(&lfence
);
1716 ttm_bo_unref(&query_val
.bo
);
1717 ttm_bo_unref(&pinned_val
.bo
);
1718 ttm_bo_unref(&dev_priv
->pinned_bo
);
1724 ttm_eu_backoff_reservation(&ticket
, &validate_list
);
1726 ttm_bo_unref(&query_val
.bo
);
1727 ttm_bo_unref(&pinned_val
.bo
);
1728 ttm_bo_unref(&dev_priv
->pinned_bo
);
1732 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
1735 * @dev_priv: The device private structure.
1737 * This function should be used to unpin the pinned query bo, or
1738 * as a query barrier when we need to make sure that all queries have
1739 * finished before the next fifo command. (For example on hardware
1740 * context destructions where the hardware may otherwise leak unfinished
1743 * This function does not return any failure codes, but make attempts
1744 * to do safe unpinning in case of errors.
1746 * The function will synchronize on the previous query barrier, and will
1747 * thus not finish until that barrier has executed.
1749 void vmw_execbuf_release_pinned_bo(struct vmw_private
*dev_priv
)
1751 mutex_lock(&dev_priv
->cmdbuf_mutex
);
1752 if (dev_priv
->query_cid_valid
)
1753 __vmw_execbuf_release_pinned_bo(dev_priv
, NULL
);
1754 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
1758 int vmw_execbuf_ioctl(struct drm_device
*dev
, void *data
,
1759 struct drm_file
*file_priv
)
1761 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1762 struct drm_vmw_execbuf_arg
*arg
= (struct drm_vmw_execbuf_arg
*)data
;
1763 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
1767 * This will allow us to extend the ioctl argument while
1768 * maintaining backwards compatibility:
1769 * We take different code paths depending on the value of
1773 if (unlikely(arg
->version
!= DRM_VMW_EXECBUF_VERSION
)) {
1774 DRM_ERROR("Incorrect execbuf version.\n");
1775 DRM_ERROR("You're running outdated experimental "
1776 "vmwgfx user-space drivers.");
1780 ret
= ttm_read_lock(&vmaster
->lock
, true);
1781 if (unlikely(ret
!= 0))
1784 ret
= vmw_execbuf_process(file_priv
, dev_priv
,
1785 (void __user
*)(unsigned long)arg
->commands
,
1786 NULL
, arg
->command_size
, arg
->throttle_us
,
1787 (void __user
*)(unsigned long)arg
->fence_rep
,
1790 if (unlikely(ret
!= 0))
1793 vmw_kms_cursor_post_execbuf(dev_priv
);
1796 ttm_read_unlock(&vmaster
->lock
);