Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / gpu / drm / vmwgfx / vmwgfx_context.c
blob3767ac335acae2a0013b287785c4538806051efc
1 /**************************************************************************
3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <drm/ttm/ttm_placement.h>
30 #include "vmwgfx_drv.h"
31 #include "vmwgfx_resource_priv.h"
32 #include "vmwgfx_binding.h"
34 struct vmw_user_context {
35 struct ttm_base_object base;
36 struct vmw_resource res;
37 struct vmw_ctx_binding_state *cbs;
38 struct vmw_cmdbuf_res_manager *man;
39 struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
40 spinlock_t cotable_lock;
41 struct vmw_dma_buffer *dx_query_mob;
44 static void vmw_user_context_free(struct vmw_resource *res);
45 static struct vmw_resource *
46 vmw_user_context_base_to_res(struct ttm_base_object *base);
48 static int vmw_gb_context_create(struct vmw_resource *res);
49 static int vmw_gb_context_bind(struct vmw_resource *res,
50 struct ttm_validate_buffer *val_buf);
51 static int vmw_gb_context_unbind(struct vmw_resource *res,
52 bool readback,
53 struct ttm_validate_buffer *val_buf);
54 static int vmw_gb_context_destroy(struct vmw_resource *res);
55 static int vmw_dx_context_create(struct vmw_resource *res);
56 static int vmw_dx_context_bind(struct vmw_resource *res,
57 struct ttm_validate_buffer *val_buf);
58 static int vmw_dx_context_unbind(struct vmw_resource *res,
59 bool readback,
60 struct ttm_validate_buffer *val_buf);
61 static int vmw_dx_context_destroy(struct vmw_resource *res);
63 static uint64_t vmw_user_context_size;
65 static const struct vmw_user_resource_conv user_context_conv = {
66 .object_type = VMW_RES_CONTEXT,
67 .base_obj_to_res = vmw_user_context_base_to_res,
68 .res_free = vmw_user_context_free
71 const struct vmw_user_resource_conv *user_context_converter =
72 &user_context_conv;
75 static const struct vmw_res_func vmw_legacy_context_func = {
76 .res_type = vmw_res_context,
77 .needs_backup = false,
78 .may_evict = false,
79 .type_name = "legacy contexts",
80 .backup_placement = NULL,
81 .create = NULL,
82 .destroy = NULL,
83 .bind = NULL,
84 .unbind = NULL
87 static const struct vmw_res_func vmw_gb_context_func = {
88 .res_type = vmw_res_context,
89 .needs_backup = true,
90 .may_evict = true,
91 .type_name = "guest backed contexts",
92 .backup_placement = &vmw_mob_placement,
93 .create = vmw_gb_context_create,
94 .destroy = vmw_gb_context_destroy,
95 .bind = vmw_gb_context_bind,
96 .unbind = vmw_gb_context_unbind
99 static const struct vmw_res_func vmw_dx_context_func = {
100 .res_type = vmw_res_dx_context,
101 .needs_backup = true,
102 .may_evict = true,
103 .type_name = "dx contexts",
104 .backup_placement = &vmw_mob_placement,
105 .create = vmw_dx_context_create,
106 .destroy = vmw_dx_context_destroy,
107 .bind = vmw_dx_context_bind,
108 .unbind = vmw_dx_context_unbind
112 * Context management:
115 static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
117 struct vmw_resource *res;
118 int i;
120 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
121 spin_lock(&uctx->cotable_lock);
122 res = uctx->cotables[i];
123 uctx->cotables[i] = NULL;
124 spin_unlock(&uctx->cotable_lock);
126 if (res)
127 vmw_resource_unreference(&res);
131 static void vmw_hw_context_destroy(struct vmw_resource *res)
133 struct vmw_user_context *uctx =
134 container_of(res, struct vmw_user_context, res);
135 struct vmw_private *dev_priv = res->dev_priv;
136 struct {
137 SVGA3dCmdHeader header;
138 SVGA3dCmdDestroyContext body;
139 } *cmd;
142 if (res->func->destroy == vmw_gb_context_destroy ||
143 res->func->destroy == vmw_dx_context_destroy) {
144 mutex_lock(&dev_priv->cmdbuf_mutex);
145 vmw_cmdbuf_res_man_destroy(uctx->man);
146 mutex_lock(&dev_priv->binding_mutex);
147 vmw_binding_state_kill(uctx->cbs);
148 (void) res->func->destroy(res);
149 mutex_unlock(&dev_priv->binding_mutex);
150 if (dev_priv->pinned_bo != NULL &&
151 !dev_priv->query_cid_valid)
152 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
153 mutex_unlock(&dev_priv->cmdbuf_mutex);
154 vmw_context_cotables_unref(uctx);
155 return;
158 vmw_execbuf_release_pinned_bo(dev_priv);
159 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
160 if (unlikely(cmd == NULL)) {
161 DRM_ERROR("Failed reserving FIFO space for surface "
162 "destruction.\n");
163 return;
166 cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
167 cmd->header.size = sizeof(cmd->body);
168 cmd->body.cid = res->id;
170 vmw_fifo_commit(dev_priv, sizeof(*cmd));
171 vmw_fifo_resource_dec(dev_priv);
174 static int vmw_gb_context_init(struct vmw_private *dev_priv,
175 bool dx,
176 struct vmw_resource *res,
177 void (*res_free)(struct vmw_resource *res))
179 int ret, i;
180 struct vmw_user_context *uctx =
181 container_of(res, struct vmw_user_context, res);
183 res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
184 SVGA3D_CONTEXT_DATA_SIZE);
185 ret = vmw_resource_init(dev_priv, res, true,
186 res_free,
187 dx ? &vmw_dx_context_func :
188 &vmw_gb_context_func);
189 if (unlikely(ret != 0))
190 goto out_err;
192 if (dev_priv->has_mob) {
193 uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
194 if (IS_ERR(uctx->man)) {
195 ret = PTR_ERR(uctx->man);
196 uctx->man = NULL;
197 goto out_err;
201 uctx->cbs = vmw_binding_state_alloc(dev_priv);
202 if (IS_ERR(uctx->cbs)) {
203 ret = PTR_ERR(uctx->cbs);
204 goto out_err;
207 spin_lock_init(&uctx->cotable_lock);
209 if (dx) {
210 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
211 uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
212 &uctx->res, i);
213 if (unlikely(IS_ERR(uctx->cotables[i]))) {
214 ret = PTR_ERR(uctx->cotables[i]);
215 goto out_cotables;
222 vmw_resource_activate(res, vmw_hw_context_destroy);
223 return 0;
225 out_cotables:
226 vmw_context_cotables_unref(uctx);
227 out_err:
228 if (res_free)
229 res_free(res);
230 else
231 kfree(res);
232 return ret;
235 static int vmw_context_init(struct vmw_private *dev_priv,
236 struct vmw_resource *res,
237 void (*res_free)(struct vmw_resource *res),
238 bool dx)
240 int ret;
242 struct {
243 SVGA3dCmdHeader header;
244 SVGA3dCmdDefineContext body;
245 } *cmd;
247 if (dev_priv->has_mob)
248 return vmw_gb_context_init(dev_priv, dx, res, res_free);
250 ret = vmw_resource_init(dev_priv, res, false,
251 res_free, &vmw_legacy_context_func);
253 if (unlikely(ret != 0)) {
254 DRM_ERROR("Failed to allocate a resource id.\n");
255 goto out_early;
258 if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
259 DRM_ERROR("Out of hw context ids.\n");
260 vmw_resource_unreference(&res);
261 return -ENOMEM;
264 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
265 if (unlikely(cmd == NULL)) {
266 DRM_ERROR("Fifo reserve failed.\n");
267 vmw_resource_unreference(&res);
268 return -ENOMEM;
271 cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
272 cmd->header.size = sizeof(cmd->body);
273 cmd->body.cid = res->id;
275 vmw_fifo_commit(dev_priv, sizeof(*cmd));
276 vmw_fifo_resource_inc(dev_priv);
277 vmw_resource_activate(res, vmw_hw_context_destroy);
278 return 0;
280 out_early:
281 if (res_free == NULL)
282 kfree(res);
283 else
284 res_free(res);
285 return ret;
290 * GB context.
293 static int vmw_gb_context_create(struct vmw_resource *res)
295 struct vmw_private *dev_priv = res->dev_priv;
296 int ret;
297 struct {
298 SVGA3dCmdHeader header;
299 SVGA3dCmdDefineGBContext body;
300 } *cmd;
302 if (likely(res->id != -1))
303 return 0;
305 ret = vmw_resource_alloc_id(res);
306 if (unlikely(ret != 0)) {
307 DRM_ERROR("Failed to allocate a context id.\n");
308 goto out_no_id;
311 if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
312 ret = -EBUSY;
313 goto out_no_fifo;
316 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
317 if (unlikely(cmd == NULL)) {
318 DRM_ERROR("Failed reserving FIFO space for context "
319 "creation.\n");
320 ret = -ENOMEM;
321 goto out_no_fifo;
324 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
325 cmd->header.size = sizeof(cmd->body);
326 cmd->body.cid = res->id;
327 vmw_fifo_commit(dev_priv, sizeof(*cmd));
328 vmw_fifo_resource_inc(dev_priv);
330 return 0;
332 out_no_fifo:
333 vmw_resource_release_id(res);
334 out_no_id:
335 return ret;
338 static int vmw_gb_context_bind(struct vmw_resource *res,
339 struct ttm_validate_buffer *val_buf)
341 struct vmw_private *dev_priv = res->dev_priv;
342 struct {
343 SVGA3dCmdHeader header;
344 SVGA3dCmdBindGBContext body;
345 } *cmd;
346 struct ttm_buffer_object *bo = val_buf->bo;
348 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
350 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
351 if (unlikely(cmd == NULL)) {
352 DRM_ERROR("Failed reserving FIFO space for context "
353 "binding.\n");
354 return -ENOMEM;
356 cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
357 cmd->header.size = sizeof(cmd->body);
358 cmd->body.cid = res->id;
359 cmd->body.mobid = bo->mem.start;
360 cmd->body.validContents = res->backup_dirty;
361 res->backup_dirty = false;
362 vmw_fifo_commit(dev_priv, sizeof(*cmd));
364 return 0;
367 static int vmw_gb_context_unbind(struct vmw_resource *res,
368 bool readback,
369 struct ttm_validate_buffer *val_buf)
371 struct vmw_private *dev_priv = res->dev_priv;
372 struct ttm_buffer_object *bo = val_buf->bo;
373 struct vmw_fence_obj *fence;
374 struct vmw_user_context *uctx =
375 container_of(res, struct vmw_user_context, res);
377 struct {
378 SVGA3dCmdHeader header;
379 SVGA3dCmdReadbackGBContext body;
380 } *cmd1;
381 struct {
382 SVGA3dCmdHeader header;
383 SVGA3dCmdBindGBContext body;
384 } *cmd2;
385 uint32_t submit_size;
386 uint8_t *cmd;
389 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
391 mutex_lock(&dev_priv->binding_mutex);
392 vmw_binding_state_scrub(uctx->cbs);
394 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
396 cmd = vmw_fifo_reserve(dev_priv, submit_size);
397 if (unlikely(cmd == NULL)) {
398 DRM_ERROR("Failed reserving FIFO space for context "
399 "unbinding.\n");
400 mutex_unlock(&dev_priv->binding_mutex);
401 return -ENOMEM;
404 cmd2 = (void *) cmd;
405 if (readback) {
406 cmd1 = (void *) cmd;
407 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
408 cmd1->header.size = sizeof(cmd1->body);
409 cmd1->body.cid = res->id;
410 cmd2 = (void *) (&cmd1[1]);
412 cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
413 cmd2->header.size = sizeof(cmd2->body);
414 cmd2->body.cid = res->id;
415 cmd2->body.mobid = SVGA3D_INVALID_ID;
417 vmw_fifo_commit(dev_priv, submit_size);
418 mutex_unlock(&dev_priv->binding_mutex);
421 * Create a fence object and fence the backup buffer.
424 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
425 &fence, NULL);
427 vmw_fence_single_bo(bo, fence);
429 if (likely(fence != NULL))
430 vmw_fence_obj_unreference(&fence);
432 return 0;
435 static int vmw_gb_context_destroy(struct vmw_resource *res)
437 struct vmw_private *dev_priv = res->dev_priv;
438 struct {
439 SVGA3dCmdHeader header;
440 SVGA3dCmdDestroyGBContext body;
441 } *cmd;
443 if (likely(res->id == -1))
444 return 0;
446 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
447 if (unlikely(cmd == NULL)) {
448 DRM_ERROR("Failed reserving FIFO space for context "
449 "destruction.\n");
450 return -ENOMEM;
453 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
454 cmd->header.size = sizeof(cmd->body);
455 cmd->body.cid = res->id;
456 vmw_fifo_commit(dev_priv, sizeof(*cmd));
457 if (dev_priv->query_cid == res->id)
458 dev_priv->query_cid_valid = false;
459 vmw_resource_release_id(res);
460 vmw_fifo_resource_dec(dev_priv);
462 return 0;
466 * DX context.
469 static int vmw_dx_context_create(struct vmw_resource *res)
471 struct vmw_private *dev_priv = res->dev_priv;
472 int ret;
473 struct {
474 SVGA3dCmdHeader header;
475 SVGA3dCmdDXDefineContext body;
476 } *cmd;
478 if (likely(res->id != -1))
479 return 0;
481 ret = vmw_resource_alloc_id(res);
482 if (unlikely(ret != 0)) {
483 DRM_ERROR("Failed to allocate a context id.\n");
484 goto out_no_id;
487 if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
488 ret = -EBUSY;
489 goto out_no_fifo;
492 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
493 if (unlikely(cmd == NULL)) {
494 DRM_ERROR("Failed reserving FIFO space for context "
495 "creation.\n");
496 ret = -ENOMEM;
497 goto out_no_fifo;
500 cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
501 cmd->header.size = sizeof(cmd->body);
502 cmd->body.cid = res->id;
503 vmw_fifo_commit(dev_priv, sizeof(*cmd));
504 vmw_fifo_resource_inc(dev_priv);
506 return 0;
508 out_no_fifo:
509 vmw_resource_release_id(res);
510 out_no_id:
511 return ret;
514 static int vmw_dx_context_bind(struct vmw_resource *res,
515 struct ttm_validate_buffer *val_buf)
517 struct vmw_private *dev_priv = res->dev_priv;
518 struct {
519 SVGA3dCmdHeader header;
520 SVGA3dCmdDXBindContext body;
521 } *cmd;
522 struct ttm_buffer_object *bo = val_buf->bo;
524 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
526 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
527 if (unlikely(cmd == NULL)) {
528 DRM_ERROR("Failed reserving FIFO space for context "
529 "binding.\n");
530 return -ENOMEM;
533 cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
534 cmd->header.size = sizeof(cmd->body);
535 cmd->body.cid = res->id;
536 cmd->body.mobid = bo->mem.start;
537 cmd->body.validContents = res->backup_dirty;
538 res->backup_dirty = false;
539 vmw_fifo_commit(dev_priv, sizeof(*cmd));
542 return 0;
546 * vmw_dx_context_scrub_cotables - Scrub all bindings and
547 * cotables from a context
549 * @ctx: Pointer to the context resource
550 * @readback: Whether to save the otable contents on scrubbing.
552 * COtables must be unbound before their context, but unbinding requires
553 * the backup buffer being reserved, whereas scrubbing does not.
554 * This function scrubs all cotables of a context, potentially reading back
555 * the contents into their backup buffers. However, scrubbing cotables
556 * also makes the device context invalid, so scrub all bindings first so
557 * that doesn't have to be done later with an invalid context.
559 void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
560 bool readback)
562 struct vmw_user_context *uctx =
563 container_of(ctx, struct vmw_user_context, res);
564 int i;
566 vmw_binding_state_scrub(uctx->cbs);
567 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
568 struct vmw_resource *res;
570 /* Avoid racing with ongoing cotable destruction. */
571 spin_lock(&uctx->cotable_lock);
572 res = uctx->cotables[vmw_cotable_scrub_order[i]];
573 if (res)
574 res = vmw_resource_reference_unless_doomed(res);
575 spin_unlock(&uctx->cotable_lock);
576 if (!res)
577 continue;
579 WARN_ON(vmw_cotable_scrub(res, readback));
580 vmw_resource_unreference(&res);
584 static int vmw_dx_context_unbind(struct vmw_resource *res,
585 bool readback,
586 struct ttm_validate_buffer *val_buf)
588 struct vmw_private *dev_priv = res->dev_priv;
589 struct ttm_buffer_object *bo = val_buf->bo;
590 struct vmw_fence_obj *fence;
591 struct vmw_user_context *uctx =
592 container_of(res, struct vmw_user_context, res);
594 struct {
595 SVGA3dCmdHeader header;
596 SVGA3dCmdDXReadbackContext body;
597 } *cmd1;
598 struct {
599 SVGA3dCmdHeader header;
600 SVGA3dCmdDXBindContext body;
601 } *cmd2;
602 uint32_t submit_size;
603 uint8_t *cmd;
606 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
608 mutex_lock(&dev_priv->binding_mutex);
609 vmw_dx_context_scrub_cotables(res, readback);
611 if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
612 readback) {
613 WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
614 if (vmw_query_readback_all(uctx->dx_query_mob))
615 DRM_ERROR("Failed to read back query states\n");
618 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
620 cmd = vmw_fifo_reserve(dev_priv, submit_size);
621 if (unlikely(cmd == NULL)) {
622 DRM_ERROR("Failed reserving FIFO space for context "
623 "unbinding.\n");
624 mutex_unlock(&dev_priv->binding_mutex);
625 return -ENOMEM;
628 cmd2 = (void *) cmd;
629 if (readback) {
630 cmd1 = (void *) cmd;
631 cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
632 cmd1->header.size = sizeof(cmd1->body);
633 cmd1->body.cid = res->id;
634 cmd2 = (void *) (&cmd1[1]);
636 cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
637 cmd2->header.size = sizeof(cmd2->body);
638 cmd2->body.cid = res->id;
639 cmd2->body.mobid = SVGA3D_INVALID_ID;
641 vmw_fifo_commit(dev_priv, submit_size);
642 mutex_unlock(&dev_priv->binding_mutex);
645 * Create a fence object and fence the backup buffer.
648 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
649 &fence, NULL);
651 vmw_fence_single_bo(bo, fence);
653 if (likely(fence != NULL))
654 vmw_fence_obj_unreference(&fence);
656 return 0;
659 static int vmw_dx_context_destroy(struct vmw_resource *res)
661 struct vmw_private *dev_priv = res->dev_priv;
662 struct {
663 SVGA3dCmdHeader header;
664 SVGA3dCmdDXDestroyContext body;
665 } *cmd;
667 if (likely(res->id == -1))
668 return 0;
670 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
671 if (unlikely(cmd == NULL)) {
672 DRM_ERROR("Failed reserving FIFO space for context "
673 "destruction.\n");
674 return -ENOMEM;
677 cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
678 cmd->header.size = sizeof(cmd->body);
679 cmd->body.cid = res->id;
680 vmw_fifo_commit(dev_priv, sizeof(*cmd));
681 if (dev_priv->query_cid == res->id)
682 dev_priv->query_cid_valid = false;
683 vmw_resource_release_id(res);
684 vmw_fifo_resource_dec(dev_priv);
686 return 0;
690 * User-space context management:
693 static struct vmw_resource *
694 vmw_user_context_base_to_res(struct ttm_base_object *base)
696 return &(container_of(base, struct vmw_user_context, base)->res);
699 static void vmw_user_context_free(struct vmw_resource *res)
701 struct vmw_user_context *ctx =
702 container_of(res, struct vmw_user_context, res);
703 struct vmw_private *dev_priv = res->dev_priv;
705 if (ctx->cbs)
706 vmw_binding_state_free(ctx->cbs);
708 (void) vmw_context_bind_dx_query(res, NULL);
710 ttm_base_object_kfree(ctx, base);
711 ttm_mem_global_free(vmw_mem_glob(dev_priv),
712 vmw_user_context_size);
716 * This function is called when user space has no more references on the
717 * base object. It releases the base-object's reference on the resource object.
720 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
722 struct ttm_base_object *base = *p_base;
723 struct vmw_user_context *ctx =
724 container_of(base, struct vmw_user_context, base);
725 struct vmw_resource *res = &ctx->res;
727 *p_base = NULL;
728 vmw_resource_unreference(&res);
731 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
732 struct drm_file *file_priv)
734 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
735 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
737 return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
740 static int vmw_context_define(struct drm_device *dev, void *data,
741 struct drm_file *file_priv, bool dx)
743 struct vmw_private *dev_priv = vmw_priv(dev);
744 struct vmw_user_context *ctx;
745 struct vmw_resource *res;
746 struct vmw_resource *tmp;
747 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
748 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
749 struct ttm_operation_ctx ttm_opt_ctx = {
750 .interruptible = true,
751 .no_wait_gpu = false
753 int ret;
755 if (!dev_priv->has_dx && dx) {
756 DRM_ERROR("DX contexts not supported by device.\n");
757 return -EINVAL;
761 * Approximate idr memory usage with 128 bytes. It will be limited
762 * by maximum number_of contexts anyway.
765 if (unlikely(vmw_user_context_size == 0))
766 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
767 ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
769 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
770 if (unlikely(ret != 0))
771 return ret;
773 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
774 vmw_user_context_size,
775 &ttm_opt_ctx);
776 if (unlikely(ret != 0)) {
777 if (ret != -ERESTARTSYS)
778 DRM_ERROR("Out of graphics memory for context"
779 " creation.\n");
780 goto out_unlock;
783 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
784 if (unlikely(!ctx)) {
785 ttm_mem_global_free(vmw_mem_glob(dev_priv),
786 vmw_user_context_size);
787 ret = -ENOMEM;
788 goto out_unlock;
791 res = &ctx->res;
792 ctx->base.shareable = false;
793 ctx->base.tfile = NULL;
796 * From here on, the destructor takes over resource freeing.
799 ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
800 if (unlikely(ret != 0))
801 goto out_unlock;
803 tmp = vmw_resource_reference(&ctx->res);
804 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
805 &vmw_user_context_base_release, NULL);
807 if (unlikely(ret != 0)) {
808 vmw_resource_unreference(&tmp);
809 goto out_err;
812 arg->cid = ctx->base.hash.key;
813 out_err:
814 vmw_resource_unreference(&res);
815 out_unlock:
816 ttm_read_unlock(&dev_priv->reservation_sem);
817 return ret;
820 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
821 struct drm_file *file_priv)
823 return vmw_context_define(dev, data, file_priv, false);
826 int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
827 struct drm_file *file_priv)
829 union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
830 struct drm_vmw_context_arg *rep = &arg->rep;
832 switch (arg->req) {
833 case drm_vmw_context_legacy:
834 return vmw_context_define(dev, rep, file_priv, false);
835 case drm_vmw_context_dx:
836 return vmw_context_define(dev, rep, file_priv, true);
837 default:
838 break;
840 return -EINVAL;
844 * vmw_context_binding_list - Return a list of context bindings
846 * @ctx: The context resource
848 * Returns the current list of bindings of the given context. Note that
849 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
851 struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
853 struct vmw_user_context *uctx =
854 container_of(ctx, struct vmw_user_context, res);
856 return vmw_binding_state_list(uctx->cbs);
859 struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
861 return container_of(ctx, struct vmw_user_context, res)->man;
864 struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
865 SVGACOTableType cotable_type)
867 if (cotable_type >= SVGA_COTABLE_DX10_MAX)
868 return ERR_PTR(-EINVAL);
870 return vmw_resource_reference
871 (container_of(ctx, struct vmw_user_context, res)->
872 cotables[cotable_type]);
876 * vmw_context_binding_state -
877 * Return a pointer to a context binding state structure
879 * @ctx: The context resource
881 * Returns the current state of bindings of the given context. Note that
882 * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
884 struct vmw_ctx_binding_state *
885 vmw_context_binding_state(struct vmw_resource *ctx)
887 return container_of(ctx, struct vmw_user_context, res)->cbs;
891 * vmw_context_bind_dx_query -
892 * Sets query MOB for the context. If @mob is NULL, then this function will
893 * remove the association between the MOB and the context. This function
894 * assumes the binding_mutex is held.
896 * @ctx_res: The context resource
897 * @mob: a reference to the query MOB
899 * Returns -EINVAL if a MOB has already been set and does not match the one
900 * specified in the parameter. 0 otherwise.
902 int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
903 struct vmw_dma_buffer *mob)
905 struct vmw_user_context *uctx =
906 container_of(ctx_res, struct vmw_user_context, res);
908 if (mob == NULL) {
909 if (uctx->dx_query_mob) {
910 uctx->dx_query_mob->dx_query_ctx = NULL;
911 vmw_dmabuf_unreference(&uctx->dx_query_mob);
912 uctx->dx_query_mob = NULL;
915 return 0;
918 /* Can only have one MOB per context for queries */
919 if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
920 return -EINVAL;
922 mob->dx_query_ctx = ctx_res;
924 if (!uctx->dx_query_mob)
925 uctx->dx_query_mob = vmw_dmabuf_reference(mob);
927 return 0;
931 * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
933 * @ctx_res: The context resource
935 struct vmw_dma_buffer *
936 vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
938 struct vmw_user_context *uctx =
939 container_of(ctx_res, struct vmw_user_context, res);
941 return uctx->dx_query_mob;