1 /**************************************************************************
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_resource_priv.h"
30 #include "ttm/ttm_placement.h"
32 struct vmw_user_context
{
33 struct ttm_base_object base
;
34 struct vmw_resource res
;
35 struct vmw_ctx_binding_state cbs
;
36 struct vmw_cmdbuf_res_manager
*man
;
41 typedef int (*vmw_scrub_func
)(struct vmw_ctx_bindinfo
*, bool);
43 static void vmw_user_context_free(struct vmw_resource
*res
);
44 static struct vmw_resource
*
45 vmw_user_context_base_to_res(struct ttm_base_object
*base
);
47 static int vmw_gb_context_create(struct vmw_resource
*res
);
48 static int vmw_gb_context_bind(struct vmw_resource
*res
,
49 struct ttm_validate_buffer
*val_buf
);
50 static int vmw_gb_context_unbind(struct vmw_resource
*res
,
52 struct ttm_validate_buffer
*val_buf
);
53 static int vmw_gb_context_destroy(struct vmw_resource
*res
);
54 static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo
*bi
, bool rebind
);
55 static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo
*bi
,
57 static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo
*bi
, bool rebind
);
58 static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state
*cbs
);
59 static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state
*cbs
);
60 static uint64_t vmw_user_context_size
;
62 static const struct vmw_user_resource_conv user_context_conv
= {
63 .object_type
= VMW_RES_CONTEXT
,
64 .base_obj_to_res
= vmw_user_context_base_to_res
,
65 .res_free
= vmw_user_context_free
68 const struct vmw_user_resource_conv
*user_context_converter
=
72 static const struct vmw_res_func vmw_legacy_context_func
= {
73 .res_type
= vmw_res_context
,
74 .needs_backup
= false,
76 .type_name
= "legacy contexts",
77 .backup_placement
= NULL
,
84 static const struct vmw_res_func vmw_gb_context_func
= {
85 .res_type
= vmw_res_context
,
88 .type_name
= "guest backed contexts",
89 .backup_placement
= &vmw_mob_placement
,
90 .create
= vmw_gb_context_create
,
91 .destroy
= vmw_gb_context_destroy
,
92 .bind
= vmw_gb_context_bind
,
93 .unbind
= vmw_gb_context_unbind
96 static const vmw_scrub_func vmw_scrub_funcs
[vmw_ctx_binding_max
] = {
97 [vmw_ctx_binding_shader
] = vmw_context_scrub_shader
,
98 [vmw_ctx_binding_rt
] = vmw_context_scrub_render_target
,
99 [vmw_ctx_binding_tex
] = vmw_context_scrub_texture
};
102 * Context management:
105 static void vmw_hw_context_destroy(struct vmw_resource
*res
)
107 struct vmw_user_context
*uctx
=
108 container_of(res
, struct vmw_user_context
, res
);
109 struct vmw_private
*dev_priv
= res
->dev_priv
;
111 SVGA3dCmdHeader header
;
112 SVGA3dCmdDestroyContext body
;
116 if (res
->func
->destroy
== vmw_gb_context_destroy
) {
117 mutex_lock(&dev_priv
->cmdbuf_mutex
);
118 vmw_cmdbuf_res_man_destroy(uctx
->man
);
119 mutex_lock(&dev_priv
->binding_mutex
);
120 (void) vmw_context_binding_state_kill(&uctx
->cbs
);
121 (void) vmw_gb_context_destroy(res
);
122 mutex_unlock(&dev_priv
->binding_mutex
);
123 if (dev_priv
->pinned_bo
!= NULL
&&
124 !dev_priv
->query_cid_valid
)
125 __vmw_execbuf_release_pinned_bo(dev_priv
, NULL
);
126 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
130 vmw_execbuf_release_pinned_bo(dev_priv
);
131 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
132 if (unlikely(cmd
== NULL
)) {
133 DRM_ERROR("Failed reserving FIFO space for surface "
138 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY
);
139 cmd
->header
.size
= cpu_to_le32(sizeof(cmd
->body
));
140 cmd
->body
.cid
= cpu_to_le32(res
->id
);
142 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
143 vmw_3d_resource_dec(dev_priv
, false);
146 static int vmw_gb_context_init(struct vmw_private
*dev_priv
,
147 struct vmw_resource
*res
,
148 void (*res_free
) (struct vmw_resource
*res
))
151 struct vmw_user_context
*uctx
=
152 container_of(res
, struct vmw_user_context
, res
);
154 ret
= vmw_resource_init(dev_priv
, res
, true,
155 res_free
, &vmw_gb_context_func
);
156 res
->backup_size
= SVGA3D_CONTEXT_DATA_SIZE
;
157 if (unlikely(ret
!= 0))
160 if (dev_priv
->has_mob
) {
161 uctx
->man
= vmw_cmdbuf_res_man_create(dev_priv
);
162 if (unlikely(IS_ERR(uctx
->man
))) {
163 ret
= PTR_ERR(uctx
->man
);
169 memset(&uctx
->cbs
, 0, sizeof(uctx
->cbs
));
170 INIT_LIST_HEAD(&uctx
->cbs
.list
);
172 vmw_resource_activate(res
, vmw_hw_context_destroy
);
183 static int vmw_context_init(struct vmw_private
*dev_priv
,
184 struct vmw_resource
*res
,
185 void (*res_free
) (struct vmw_resource
*res
))
190 SVGA3dCmdHeader header
;
191 SVGA3dCmdDefineContext body
;
194 if (dev_priv
->has_mob
)
195 return vmw_gb_context_init(dev_priv
, res
, res_free
);
197 ret
= vmw_resource_init(dev_priv
, res
, false,
198 res_free
, &vmw_legacy_context_func
);
200 if (unlikely(ret
!= 0)) {
201 DRM_ERROR("Failed to allocate a resource id.\n");
205 if (unlikely(res
->id
>= SVGA3D_MAX_CONTEXT_IDS
)) {
206 DRM_ERROR("Out of hw context ids.\n");
207 vmw_resource_unreference(&res
);
211 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
212 if (unlikely(cmd
== NULL
)) {
213 DRM_ERROR("Fifo reserve failed.\n");
214 vmw_resource_unreference(&res
);
218 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE
);
219 cmd
->header
.size
= cpu_to_le32(sizeof(cmd
->body
));
220 cmd
->body
.cid
= cpu_to_le32(res
->id
);
222 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
223 (void) vmw_3d_resource_inc(dev_priv
, false);
224 vmw_resource_activate(res
, vmw_hw_context_destroy
);
228 if (res_free
== NULL
)
235 struct vmw_resource
*vmw_context_alloc(struct vmw_private
*dev_priv
)
237 struct vmw_resource
*res
= kmalloc(sizeof(*res
), GFP_KERNEL
);
240 if (unlikely(res
== NULL
))
243 ret
= vmw_context_init(dev_priv
, res
, NULL
);
245 return (ret
== 0) ? res
: NULL
;
249 static int vmw_gb_context_create(struct vmw_resource
*res
)
251 struct vmw_private
*dev_priv
= res
->dev_priv
;
254 SVGA3dCmdHeader header
;
255 SVGA3dCmdDefineGBContext body
;
258 if (likely(res
->id
!= -1))
261 ret
= vmw_resource_alloc_id(res
);
262 if (unlikely(ret
!= 0)) {
263 DRM_ERROR("Failed to allocate a context id.\n");
267 if (unlikely(res
->id
>= VMWGFX_NUM_GB_CONTEXT
)) {
272 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
273 if (unlikely(cmd
== NULL
)) {
274 DRM_ERROR("Failed reserving FIFO space for context "
280 cmd
->header
.id
= SVGA_3D_CMD_DEFINE_GB_CONTEXT
;
281 cmd
->header
.size
= sizeof(cmd
->body
);
282 cmd
->body
.cid
= res
->id
;
283 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
284 (void) vmw_3d_resource_inc(dev_priv
, false);
289 vmw_resource_release_id(res
);
294 static int vmw_gb_context_bind(struct vmw_resource
*res
,
295 struct ttm_validate_buffer
*val_buf
)
297 struct vmw_private
*dev_priv
= res
->dev_priv
;
299 SVGA3dCmdHeader header
;
300 SVGA3dCmdBindGBContext body
;
302 struct ttm_buffer_object
*bo
= val_buf
->bo
;
304 BUG_ON(bo
->mem
.mem_type
!= VMW_PL_MOB
);
306 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
307 if (unlikely(cmd
== NULL
)) {
308 DRM_ERROR("Failed reserving FIFO space for context "
313 cmd
->header
.id
= SVGA_3D_CMD_BIND_GB_CONTEXT
;
314 cmd
->header
.size
= sizeof(cmd
->body
);
315 cmd
->body
.cid
= res
->id
;
316 cmd
->body
.mobid
= bo
->mem
.start
;
317 cmd
->body
.validContents
= res
->backup_dirty
;
318 res
->backup_dirty
= false;
319 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
324 static int vmw_gb_context_unbind(struct vmw_resource
*res
,
326 struct ttm_validate_buffer
*val_buf
)
328 struct vmw_private
*dev_priv
= res
->dev_priv
;
329 struct ttm_buffer_object
*bo
= val_buf
->bo
;
330 struct vmw_fence_obj
*fence
;
331 struct vmw_user_context
*uctx
=
332 container_of(res
, struct vmw_user_context
, res
);
335 SVGA3dCmdHeader header
;
336 SVGA3dCmdReadbackGBContext body
;
339 SVGA3dCmdHeader header
;
340 SVGA3dCmdBindGBContext body
;
342 uint32_t submit_size
;
346 BUG_ON(bo
->mem
.mem_type
!= VMW_PL_MOB
);
348 mutex_lock(&dev_priv
->binding_mutex
);
349 vmw_context_binding_state_scrub(&uctx
->cbs
);
351 submit_size
= sizeof(*cmd2
) + (readback
? sizeof(*cmd1
) : 0);
353 cmd
= vmw_fifo_reserve(dev_priv
, submit_size
);
354 if (unlikely(cmd
== NULL
)) {
355 DRM_ERROR("Failed reserving FIFO space for context "
357 mutex_unlock(&dev_priv
->binding_mutex
);
364 cmd1
->header
.id
= SVGA_3D_CMD_READBACK_GB_CONTEXT
;
365 cmd1
->header
.size
= sizeof(cmd1
->body
);
366 cmd1
->body
.cid
= res
->id
;
367 cmd2
= (void *) (&cmd1
[1]);
369 cmd2
->header
.id
= SVGA_3D_CMD_BIND_GB_CONTEXT
;
370 cmd2
->header
.size
= sizeof(cmd2
->body
);
371 cmd2
->body
.cid
= res
->id
;
372 cmd2
->body
.mobid
= SVGA3D_INVALID_ID
;
374 vmw_fifo_commit(dev_priv
, submit_size
);
375 mutex_unlock(&dev_priv
->binding_mutex
);
378 * Create a fence object and fence the backup buffer.
381 (void) vmw_execbuf_fence_commands(NULL
, dev_priv
,
384 vmw_fence_single_bo(bo
, fence
);
386 if (likely(fence
!= NULL
))
387 vmw_fence_obj_unreference(&fence
);
392 static int vmw_gb_context_destroy(struct vmw_resource
*res
)
394 struct vmw_private
*dev_priv
= res
->dev_priv
;
396 SVGA3dCmdHeader header
;
397 SVGA3dCmdDestroyGBContext body
;
400 if (likely(res
->id
== -1))
403 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
404 if (unlikely(cmd
== NULL
)) {
405 DRM_ERROR("Failed reserving FIFO space for context "
410 cmd
->header
.id
= SVGA_3D_CMD_DESTROY_GB_CONTEXT
;
411 cmd
->header
.size
= sizeof(cmd
->body
);
412 cmd
->body
.cid
= res
->id
;
413 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
414 if (dev_priv
->query_cid
== res
->id
)
415 dev_priv
->query_cid_valid
= false;
416 vmw_resource_release_id(res
);
417 vmw_3d_resource_dec(dev_priv
, false);
423 * User-space context management:
426 static struct vmw_resource
*
427 vmw_user_context_base_to_res(struct ttm_base_object
*base
)
429 return &(container_of(base
, struct vmw_user_context
, base
)->res
);
432 static void vmw_user_context_free(struct vmw_resource
*res
)
434 struct vmw_user_context
*ctx
=
435 container_of(res
, struct vmw_user_context
, res
);
436 struct vmw_private
*dev_priv
= res
->dev_priv
;
438 ttm_base_object_kfree(ctx
, base
);
439 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
440 vmw_user_context_size
);
444 * This function is called when user space has no more references on the
445 * base object. It releases the base-object's reference on the resource object.
448 static void vmw_user_context_base_release(struct ttm_base_object
**p_base
)
450 struct ttm_base_object
*base
= *p_base
;
451 struct vmw_user_context
*ctx
=
452 container_of(base
, struct vmw_user_context
, base
);
453 struct vmw_resource
*res
= &ctx
->res
;
456 vmw_resource_unreference(&res
);
459 int vmw_context_destroy_ioctl(struct drm_device
*dev
, void *data
,
460 struct drm_file
*file_priv
)
462 struct drm_vmw_context_arg
*arg
= (struct drm_vmw_context_arg
*)data
;
463 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
465 return ttm_ref_object_base_unref(tfile
, arg
->cid
, TTM_REF_USAGE
);
468 int vmw_context_define_ioctl(struct drm_device
*dev
, void *data
,
469 struct drm_file
*file_priv
)
471 struct vmw_private
*dev_priv
= vmw_priv(dev
);
472 struct vmw_user_context
*ctx
;
473 struct vmw_resource
*res
;
474 struct vmw_resource
*tmp
;
475 struct drm_vmw_context_arg
*arg
= (struct drm_vmw_context_arg
*)data
;
476 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
481 * Approximate idr memory usage with 128 bytes. It will be limited
482 * by maximum number_of contexts anyway.
485 if (unlikely(vmw_user_context_size
== 0))
486 vmw_user_context_size
= ttm_round_pot(sizeof(*ctx
)) + 128 +
487 ((dev_priv
->has_mob
) ? vmw_cmdbuf_res_man_size() : 0);
489 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
490 if (unlikely(ret
!= 0))
493 ret
= ttm_mem_global_alloc(vmw_mem_glob(dev_priv
),
494 vmw_user_context_size
,
496 if (unlikely(ret
!= 0)) {
497 if (ret
!= -ERESTARTSYS
)
498 DRM_ERROR("Out of graphics memory for context"
503 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
504 if (unlikely(ctx
== NULL
)) {
505 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
506 vmw_user_context_size
);
512 ctx
->base
.shareable
= false;
513 ctx
->base
.tfile
= NULL
;
516 * From here on, the destructor takes over resource freeing.
519 ret
= vmw_context_init(dev_priv
, res
, vmw_user_context_free
);
520 if (unlikely(ret
!= 0))
523 tmp
= vmw_resource_reference(&ctx
->res
);
524 ret
= ttm_base_object_init(tfile
, &ctx
->base
, false, VMW_RES_CONTEXT
,
525 &vmw_user_context_base_release
, NULL
);
527 if (unlikely(ret
!= 0)) {
528 vmw_resource_unreference(&tmp
);
532 arg
->cid
= ctx
->base
.hash
.key
;
534 vmw_resource_unreference(&res
);
536 ttm_read_unlock(&dev_priv
->reservation_sem
);
542 * vmw_context_scrub_shader - scrub a shader binding from a context.
544 * @bi: single binding information.
545 * @rebind: Whether to issue a bind instead of scrub command.
547 static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo
*bi
, bool rebind
)
549 struct vmw_private
*dev_priv
= bi
->ctx
->dev_priv
;
551 SVGA3dCmdHeader header
;
552 SVGA3dCmdSetShader body
;
555 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
556 if (unlikely(cmd
== NULL
)) {
557 DRM_ERROR("Failed reserving FIFO space for shader "
562 cmd
->header
.id
= SVGA_3D_CMD_SET_SHADER
;
563 cmd
->header
.size
= sizeof(cmd
->body
);
564 cmd
->body
.cid
= bi
->ctx
->id
;
565 cmd
->body
.type
= bi
->i1
.shader_type
;
566 cmd
->body
.shid
= ((rebind
) ? bi
->res
->id
: SVGA3D_INVALID_ID
);
567 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
573 * vmw_context_scrub_render_target - scrub a render target binding
576 * @bi: single binding information.
577 * @rebind: Whether to issue a bind instead of scrub command.
579 static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo
*bi
,
582 struct vmw_private
*dev_priv
= bi
->ctx
->dev_priv
;
584 SVGA3dCmdHeader header
;
585 SVGA3dCmdSetRenderTarget body
;
588 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
589 if (unlikely(cmd
== NULL
)) {
590 DRM_ERROR("Failed reserving FIFO space for render target "
595 cmd
->header
.id
= SVGA_3D_CMD_SETRENDERTARGET
;
596 cmd
->header
.size
= sizeof(cmd
->body
);
597 cmd
->body
.cid
= bi
->ctx
->id
;
598 cmd
->body
.type
= bi
->i1
.rt_type
;
599 cmd
->body
.target
.sid
= ((rebind
) ? bi
->res
->id
: SVGA3D_INVALID_ID
);
600 cmd
->body
.target
.face
= 0;
601 cmd
->body
.target
.mipmap
= 0;
602 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
608 * vmw_context_scrub_texture - scrub a texture binding from a context.
610 * @bi: single binding information.
611 * @rebind: Whether to issue a bind instead of scrub command.
613 * TODO: Possibly complement this function with a function that takes
614 * a list of texture bindings and combines them to a single command.
616 static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo
*bi
,
619 struct vmw_private
*dev_priv
= bi
->ctx
->dev_priv
;
621 SVGA3dCmdHeader header
;
623 SVGA3dCmdSetTextureState c
;
624 SVGA3dTextureState s1
;
628 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
629 if (unlikely(cmd
== NULL
)) {
630 DRM_ERROR("Failed reserving FIFO space for texture "
636 cmd
->header
.id
= SVGA_3D_CMD_SETTEXTURESTATE
;
637 cmd
->header
.size
= sizeof(cmd
->body
);
638 cmd
->body
.c
.cid
= bi
->ctx
->id
;
639 cmd
->body
.s1
.stage
= bi
->i1
.texture_stage
;
640 cmd
->body
.s1
.name
= SVGA3D_TS_BIND_TEXTURE
;
641 cmd
->body
.s1
.value
= ((rebind
) ? bi
->res
->id
: SVGA3D_INVALID_ID
);
642 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
648 * vmw_context_binding_drop: Stop tracking a context binding
650 * @cb: Pointer to binding tracker storage.
652 * Stops tracking a context binding, and re-initializes its storage.
653 * Typically used when the context binding is replaced with a binding to
654 * another (or the same, for that matter) resource.
656 static void vmw_context_binding_drop(struct vmw_ctx_binding
*cb
)
658 list_del(&cb
->ctx_list
);
659 if (!list_empty(&cb
->res_list
))
660 list_del(&cb
->res_list
);
665 * vmw_context_binding_add: Start tracking a context binding
667 * @cbs: Pointer to the context binding state tracker.
668 * @bi: Information about the binding to track.
670 * Performs basic checks on the binding to make sure arguments are within
671 * bounds and then starts tracking the binding in the context binding
672 * state structure @cbs.
674 int vmw_context_binding_add(struct vmw_ctx_binding_state
*cbs
,
675 const struct vmw_ctx_bindinfo
*bi
)
677 struct vmw_ctx_binding
*loc
;
680 case vmw_ctx_binding_rt
:
681 if (unlikely((unsigned)bi
->i1
.rt_type
>= SVGA3D_RT_MAX
)) {
682 DRM_ERROR("Illegal render target type %u.\n",
683 (unsigned) bi
->i1
.rt_type
);
686 loc
= &cbs
->render_targets
[bi
->i1
.rt_type
];
688 case vmw_ctx_binding_tex
:
689 if (unlikely((unsigned)bi
->i1
.texture_stage
>=
690 SVGA3D_NUM_TEXTURE_UNITS
)) {
691 DRM_ERROR("Illegal texture/sampler unit %u.\n",
692 (unsigned) bi
->i1
.texture_stage
);
695 loc
= &cbs
->texture_units
[bi
->i1
.texture_stage
];
697 case vmw_ctx_binding_shader
:
698 if (unlikely((unsigned)bi
->i1
.shader_type
>=
699 SVGA3D_SHADERTYPE_MAX
)) {
700 DRM_ERROR("Illegal shader type %u.\n",
701 (unsigned) bi
->i1
.shader_type
);
704 loc
= &cbs
->shaders
[bi
->i1
.shader_type
];
710 if (loc
->bi
.ctx
!= NULL
)
711 vmw_context_binding_drop(loc
);
714 loc
->bi
.scrubbed
= false;
715 list_add_tail(&loc
->ctx_list
, &cbs
->list
);
716 INIT_LIST_HEAD(&loc
->res_list
);
722 * vmw_context_binding_transfer: Transfer a context binding tracking entry.
724 * @cbs: Pointer to the persistent context binding state tracker.
725 * @bi: Information about the binding to track.
728 static void vmw_context_binding_transfer(struct vmw_ctx_binding_state
*cbs
,
729 const struct vmw_ctx_bindinfo
*bi
)
731 struct vmw_ctx_binding
*loc
;
734 case vmw_ctx_binding_rt
:
735 loc
= &cbs
->render_targets
[bi
->i1
.rt_type
];
737 case vmw_ctx_binding_tex
:
738 loc
= &cbs
->texture_units
[bi
->i1
.texture_stage
];
740 case vmw_ctx_binding_shader
:
741 loc
= &cbs
->shaders
[bi
->i1
.shader_type
];
747 if (loc
->bi
.ctx
!= NULL
)
748 vmw_context_binding_drop(loc
);
750 if (bi
->res
!= NULL
) {
752 list_add_tail(&loc
->ctx_list
, &cbs
->list
);
753 list_add_tail(&loc
->res_list
, &bi
->res
->binding_head
);
758 * vmw_context_binding_kill - Kill a binding on the device
759 * and stop tracking it.
761 * @cb: Pointer to binding tracker storage.
763 * Emits FIFO commands to scrub a binding represented by @cb.
764 * Then stops tracking the binding and re-initializes its storage.
766 static void vmw_context_binding_kill(struct vmw_ctx_binding
*cb
)
768 if (!cb
->bi
.scrubbed
) {
769 (void) vmw_scrub_funcs
[cb
->bi
.bt
](&cb
->bi
, false);
770 cb
->bi
.scrubbed
= true;
772 vmw_context_binding_drop(cb
);
776 * vmw_context_binding_state_kill - Kill all bindings associated with a
777 * struct vmw_ctx_binding state structure, and re-initialize the structure.
779 * @cbs: Pointer to the context binding state tracker.
781 * Emits commands to scrub all bindings associated with the
782 * context binding state tracker. Then re-initializes the whole structure.
784 static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state
*cbs
)
786 struct vmw_ctx_binding
*entry
, *next
;
788 list_for_each_entry_safe(entry
, next
, &cbs
->list
, ctx_list
)
789 vmw_context_binding_kill(entry
);
793 * vmw_context_binding_state_scrub - Scrub all bindings associated with a
794 * struct vmw_ctx_binding state structure.
796 * @cbs: Pointer to the context binding state tracker.
798 * Emits commands to scrub all bindings associated with the
799 * context binding state tracker.
801 static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state
*cbs
)
803 struct vmw_ctx_binding
*entry
;
805 list_for_each_entry(entry
, &cbs
->list
, ctx_list
) {
806 if (!entry
->bi
.scrubbed
) {
807 (void) vmw_scrub_funcs
[entry
->bi
.bt
](&entry
->bi
, false);
808 entry
->bi
.scrubbed
= true;
814 * vmw_context_binding_res_list_kill - Kill all bindings on a
815 * resource binding list
817 * @head: list head of resource binding list
819 * Kills all bindings associated with a specific resource. Typically
820 * called before the resource is destroyed.
822 void vmw_context_binding_res_list_kill(struct list_head
*head
)
824 struct vmw_ctx_binding
*entry
, *next
;
826 list_for_each_entry_safe(entry
, next
, head
, res_list
)
827 vmw_context_binding_kill(entry
);
831 * vmw_context_binding_res_list_scrub - Scrub all bindings on a
832 * resource binding list
834 * @head: list head of resource binding list
836 * Scrub all bindings associated with a specific resource. Typically
837 * called before the resource is evicted.
839 void vmw_context_binding_res_list_scrub(struct list_head
*head
)
841 struct vmw_ctx_binding
*entry
;
843 list_for_each_entry(entry
, head
, res_list
) {
844 if (!entry
->bi
.scrubbed
) {
845 (void) vmw_scrub_funcs
[entry
->bi
.bt
](&entry
->bi
, false);
846 entry
->bi
.scrubbed
= true;
852 * vmw_context_binding_state_transfer - Commit staged binding info
854 * @ctx: Pointer to context to commit the staged binding info to.
855 * @from: Staged binding info built during execbuf.
857 * Transfers binding info from a temporary structure to the persistent
858 * structure in the context. This can be done once commands
860 void vmw_context_binding_state_transfer(struct vmw_resource
*ctx
,
861 struct vmw_ctx_binding_state
*from
)
863 struct vmw_user_context
*uctx
=
864 container_of(ctx
, struct vmw_user_context
, res
);
865 struct vmw_ctx_binding
*entry
, *next
;
867 list_for_each_entry_safe(entry
, next
, &from
->list
, ctx_list
)
868 vmw_context_binding_transfer(&uctx
->cbs
, &entry
->bi
);
872 * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
874 * @ctx: The context resource
876 * Walks through the context binding list and rebinds all scrubbed
879 int vmw_context_rebind_all(struct vmw_resource
*ctx
)
881 struct vmw_ctx_binding
*entry
;
882 struct vmw_user_context
*uctx
=
883 container_of(ctx
, struct vmw_user_context
, res
);
884 struct vmw_ctx_binding_state
*cbs
= &uctx
->cbs
;
887 list_for_each_entry(entry
, &cbs
->list
, ctx_list
) {
888 if (likely(!entry
->bi
.scrubbed
))
891 if (WARN_ON(entry
->bi
.res
== NULL
|| entry
->bi
.res
->id
==
895 ret
= vmw_scrub_funcs
[entry
->bi
.bt
](&entry
->bi
, true);
896 if (unlikely(ret
!= 0))
899 entry
->bi
.scrubbed
= false;
906 * vmw_context_binding_list - Return a list of context bindings
908 * @ctx: The context resource
910 * Returns the current list of bindings of the given context. Note that
911 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
913 struct list_head
*vmw_context_binding_list(struct vmw_resource
*ctx
)
915 return &(container_of(ctx
, struct vmw_user_context
, res
)->cbs
.list
);
918 struct vmw_cmdbuf_res_manager
*vmw_context_res_man(struct vmw_resource
*ctx
)
920 return container_of(ctx
, struct vmw_user_context
, res
)->man
;