Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / gpu / drm / vmwgfx / vmwgfx_context.c
blob61c246335e66f608578df3167c74a62c318ff1a3
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <drm/ttm/ttm_placement.h>
30 #include "vmwgfx_drv.h"
31 #include "vmwgfx_resource_priv.h"
32 #include "vmwgfx_binding.h"
34 struct vmw_user_context {
35 struct ttm_base_object base;
36 struct vmw_resource res;
37 struct vmw_ctx_binding_state *cbs;
38 struct vmw_cmdbuf_res_manager *man;
39 struct vmw_resource *cotables[SVGA_COTABLE_MAX];
40 spinlock_t cotable_lock;
41 struct vmw_buffer_object *dx_query_mob;
44 static void vmw_user_context_free(struct vmw_resource *res);
45 static struct vmw_resource *
46 vmw_user_context_base_to_res(struct ttm_base_object *base);
48 static int vmw_gb_context_create(struct vmw_resource *res);
49 static int vmw_gb_context_bind(struct vmw_resource *res,
50 struct ttm_validate_buffer *val_buf);
51 static int vmw_gb_context_unbind(struct vmw_resource *res,
52 bool readback,
53 struct ttm_validate_buffer *val_buf);
54 static int vmw_gb_context_destroy(struct vmw_resource *res);
55 static int vmw_dx_context_create(struct vmw_resource *res);
56 static int vmw_dx_context_bind(struct vmw_resource *res,
57 struct ttm_validate_buffer *val_buf);
58 static int vmw_dx_context_unbind(struct vmw_resource *res,
59 bool readback,
60 struct ttm_validate_buffer *val_buf);
61 static int vmw_dx_context_destroy(struct vmw_resource *res);
63 static uint64_t vmw_user_context_size;
65 static const struct vmw_user_resource_conv user_context_conv = {
66 .object_type = VMW_RES_CONTEXT,
67 .base_obj_to_res = vmw_user_context_base_to_res,
68 .res_free = vmw_user_context_free
71 const struct vmw_user_resource_conv *user_context_converter =
72 &user_context_conv;
75 static const struct vmw_res_func vmw_legacy_context_func = {
76 .res_type = vmw_res_context,
77 .needs_backup = false,
78 .may_evict = false,
79 .type_name = "legacy contexts",
80 .backup_placement = NULL,
81 .create = NULL,
82 .destroy = NULL,
83 .bind = NULL,
84 .unbind = NULL
87 static const struct vmw_res_func vmw_gb_context_func = {
88 .res_type = vmw_res_context,
89 .needs_backup = true,
90 .may_evict = true,
91 .prio = 3,
92 .dirty_prio = 3,
93 .type_name = "guest backed contexts",
94 .backup_placement = &vmw_mob_placement,
95 .create = vmw_gb_context_create,
96 .destroy = vmw_gb_context_destroy,
97 .bind = vmw_gb_context_bind,
98 .unbind = vmw_gb_context_unbind
101 static const struct vmw_res_func vmw_dx_context_func = {
102 .res_type = vmw_res_dx_context,
103 .needs_backup = true,
104 .may_evict = true,
105 .prio = 3,
106 .dirty_prio = 3,
107 .type_name = "dx contexts",
108 .backup_placement = &vmw_mob_placement,
109 .create = vmw_dx_context_create,
110 .destroy = vmw_dx_context_destroy,
111 .bind = vmw_dx_context_bind,
112 .unbind = vmw_dx_context_unbind
116 * Context management:
119 static void vmw_context_cotables_unref(struct vmw_private *dev_priv,
120 struct vmw_user_context *uctx)
122 struct vmw_resource *res;
123 int i;
124 u32 cotable_max = has_sm5_context(dev_priv) ?
125 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
127 for (i = 0; i < cotable_max; ++i) {
128 spin_lock(&uctx->cotable_lock);
129 res = uctx->cotables[i];
130 uctx->cotables[i] = NULL;
131 spin_unlock(&uctx->cotable_lock);
133 if (res)
134 vmw_resource_unreference(&res);
138 static void vmw_hw_context_destroy(struct vmw_resource *res)
140 struct vmw_user_context *uctx =
141 container_of(res, struct vmw_user_context, res);
142 struct vmw_private *dev_priv = res->dev_priv;
143 struct {
144 SVGA3dCmdHeader header;
145 SVGA3dCmdDestroyContext body;
146 } *cmd;
149 if (res->func->destroy == vmw_gb_context_destroy ||
150 res->func->destroy == vmw_dx_context_destroy) {
151 mutex_lock(&dev_priv->cmdbuf_mutex);
152 vmw_cmdbuf_res_man_destroy(uctx->man);
153 mutex_lock(&dev_priv->binding_mutex);
154 vmw_binding_state_kill(uctx->cbs);
155 (void) res->func->destroy(res);
156 mutex_unlock(&dev_priv->binding_mutex);
157 if (dev_priv->pinned_bo != NULL &&
158 !dev_priv->query_cid_valid)
159 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
160 mutex_unlock(&dev_priv->cmdbuf_mutex);
161 vmw_context_cotables_unref(dev_priv, uctx);
162 return;
165 vmw_execbuf_release_pinned_bo(dev_priv);
166 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
167 if (unlikely(cmd == NULL))
168 return;
170 cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
171 cmd->header.size = sizeof(cmd->body);
172 cmd->body.cid = res->id;
174 vmw_fifo_commit(dev_priv, sizeof(*cmd));
175 vmw_fifo_resource_dec(dev_priv);
178 static int vmw_gb_context_init(struct vmw_private *dev_priv,
179 bool dx,
180 struct vmw_resource *res,
181 void (*res_free)(struct vmw_resource *res))
183 int ret, i;
184 struct vmw_user_context *uctx =
185 container_of(res, struct vmw_user_context, res);
187 res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
188 SVGA3D_CONTEXT_DATA_SIZE);
189 ret = vmw_resource_init(dev_priv, res, true,
190 res_free,
191 dx ? &vmw_dx_context_func :
192 &vmw_gb_context_func);
193 if (unlikely(ret != 0))
194 goto out_err;
196 if (dev_priv->has_mob) {
197 uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
198 if (IS_ERR(uctx->man)) {
199 ret = PTR_ERR(uctx->man);
200 uctx->man = NULL;
201 goto out_err;
205 uctx->cbs = vmw_binding_state_alloc(dev_priv);
206 if (IS_ERR(uctx->cbs)) {
207 ret = PTR_ERR(uctx->cbs);
208 goto out_err;
211 spin_lock_init(&uctx->cotable_lock);
213 if (dx) {
214 u32 cotable_max = has_sm5_context(dev_priv) ?
215 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
216 for (i = 0; i < cotable_max; ++i) {
217 uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
218 &uctx->res, i);
219 if (IS_ERR(uctx->cotables[i])) {
220 ret = PTR_ERR(uctx->cotables[i]);
221 goto out_cotables;
226 res->hw_destroy = vmw_hw_context_destroy;
227 return 0;
229 out_cotables:
230 vmw_context_cotables_unref(dev_priv, uctx);
231 out_err:
232 if (res_free)
233 res_free(res);
234 else
235 kfree(res);
236 return ret;
239 static int vmw_context_init(struct vmw_private *dev_priv,
240 struct vmw_resource *res,
241 void (*res_free)(struct vmw_resource *res),
242 bool dx)
244 int ret;
246 struct {
247 SVGA3dCmdHeader header;
248 SVGA3dCmdDefineContext body;
249 } *cmd;
251 if (dev_priv->has_mob)
252 return vmw_gb_context_init(dev_priv, dx, res, res_free);
254 ret = vmw_resource_init(dev_priv, res, false,
255 res_free, &vmw_legacy_context_func);
257 if (unlikely(ret != 0)) {
258 DRM_ERROR("Failed to allocate a resource id.\n");
259 goto out_early;
262 if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
263 DRM_ERROR("Out of hw context ids.\n");
264 vmw_resource_unreference(&res);
265 return -ENOMEM;
268 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
269 if (unlikely(cmd == NULL)) {
270 vmw_resource_unreference(&res);
271 return -ENOMEM;
274 cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
275 cmd->header.size = sizeof(cmd->body);
276 cmd->body.cid = res->id;
278 vmw_fifo_commit(dev_priv, sizeof(*cmd));
279 vmw_fifo_resource_inc(dev_priv);
280 res->hw_destroy = vmw_hw_context_destroy;
281 return 0;
283 out_early:
284 if (res_free == NULL)
285 kfree(res);
286 else
287 res_free(res);
288 return ret;
293 * GB context.
296 static int vmw_gb_context_create(struct vmw_resource *res)
298 struct vmw_private *dev_priv = res->dev_priv;
299 int ret;
300 struct {
301 SVGA3dCmdHeader header;
302 SVGA3dCmdDefineGBContext body;
303 } *cmd;
305 if (likely(res->id != -1))
306 return 0;
308 ret = vmw_resource_alloc_id(res);
309 if (unlikely(ret != 0)) {
310 DRM_ERROR("Failed to allocate a context id.\n");
311 goto out_no_id;
314 if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
315 ret = -EBUSY;
316 goto out_no_fifo;
319 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
320 if (unlikely(cmd == NULL)) {
321 ret = -ENOMEM;
322 goto out_no_fifo;
325 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
326 cmd->header.size = sizeof(cmd->body);
327 cmd->body.cid = res->id;
328 vmw_fifo_commit(dev_priv, sizeof(*cmd));
329 vmw_fifo_resource_inc(dev_priv);
331 return 0;
333 out_no_fifo:
334 vmw_resource_release_id(res);
335 out_no_id:
336 return ret;
339 static int vmw_gb_context_bind(struct vmw_resource *res,
340 struct ttm_validate_buffer *val_buf)
342 struct vmw_private *dev_priv = res->dev_priv;
343 struct {
344 SVGA3dCmdHeader header;
345 SVGA3dCmdBindGBContext body;
346 } *cmd;
347 struct ttm_buffer_object *bo = val_buf->bo;
349 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
351 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
352 if (unlikely(cmd == NULL))
353 return -ENOMEM;
355 cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
356 cmd->header.size = sizeof(cmd->body);
357 cmd->body.cid = res->id;
358 cmd->body.mobid = bo->mem.start;
359 cmd->body.validContents = res->backup_dirty;
360 res->backup_dirty = false;
361 vmw_fifo_commit(dev_priv, sizeof(*cmd));
363 return 0;
366 static int vmw_gb_context_unbind(struct vmw_resource *res,
367 bool readback,
368 struct ttm_validate_buffer *val_buf)
370 struct vmw_private *dev_priv = res->dev_priv;
371 struct ttm_buffer_object *bo = val_buf->bo;
372 struct vmw_fence_obj *fence;
373 struct vmw_user_context *uctx =
374 container_of(res, struct vmw_user_context, res);
376 struct {
377 SVGA3dCmdHeader header;
378 SVGA3dCmdReadbackGBContext body;
379 } *cmd1;
380 struct {
381 SVGA3dCmdHeader header;
382 SVGA3dCmdBindGBContext body;
383 } *cmd2;
384 uint32_t submit_size;
385 uint8_t *cmd;
388 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
390 mutex_lock(&dev_priv->binding_mutex);
391 vmw_binding_state_scrub(uctx->cbs);
393 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
395 cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
396 if (unlikely(cmd == NULL)) {
397 mutex_unlock(&dev_priv->binding_mutex);
398 return -ENOMEM;
401 cmd2 = (void *) cmd;
402 if (readback) {
403 cmd1 = (void *) cmd;
404 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
405 cmd1->header.size = sizeof(cmd1->body);
406 cmd1->body.cid = res->id;
407 cmd2 = (void *) (&cmd1[1]);
409 cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
410 cmd2->header.size = sizeof(cmd2->body);
411 cmd2->body.cid = res->id;
412 cmd2->body.mobid = SVGA3D_INVALID_ID;
414 vmw_fifo_commit(dev_priv, submit_size);
415 mutex_unlock(&dev_priv->binding_mutex);
418 * Create a fence object and fence the backup buffer.
421 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
422 &fence, NULL);
424 vmw_bo_fence_single(bo, fence);
426 if (likely(fence != NULL))
427 vmw_fence_obj_unreference(&fence);
429 return 0;
432 static int vmw_gb_context_destroy(struct vmw_resource *res)
434 struct vmw_private *dev_priv = res->dev_priv;
435 struct {
436 SVGA3dCmdHeader header;
437 SVGA3dCmdDestroyGBContext body;
438 } *cmd;
440 if (likely(res->id == -1))
441 return 0;
443 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
444 if (unlikely(cmd == NULL))
445 return -ENOMEM;
447 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
448 cmd->header.size = sizeof(cmd->body);
449 cmd->body.cid = res->id;
450 vmw_fifo_commit(dev_priv, sizeof(*cmd));
451 if (dev_priv->query_cid == res->id)
452 dev_priv->query_cid_valid = false;
453 vmw_resource_release_id(res);
454 vmw_fifo_resource_dec(dev_priv);
456 return 0;
460 * DX context.
463 static int vmw_dx_context_create(struct vmw_resource *res)
465 struct vmw_private *dev_priv = res->dev_priv;
466 int ret;
467 struct {
468 SVGA3dCmdHeader header;
469 SVGA3dCmdDXDefineContext body;
470 } *cmd;
472 if (likely(res->id != -1))
473 return 0;
475 ret = vmw_resource_alloc_id(res);
476 if (unlikely(ret != 0)) {
477 DRM_ERROR("Failed to allocate a context id.\n");
478 goto out_no_id;
481 if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
482 ret = -EBUSY;
483 goto out_no_fifo;
486 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
487 if (unlikely(cmd == NULL)) {
488 ret = -ENOMEM;
489 goto out_no_fifo;
492 cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
493 cmd->header.size = sizeof(cmd->body);
494 cmd->body.cid = res->id;
495 vmw_fifo_commit(dev_priv, sizeof(*cmd));
496 vmw_fifo_resource_inc(dev_priv);
498 return 0;
500 out_no_fifo:
501 vmw_resource_release_id(res);
502 out_no_id:
503 return ret;
506 static int vmw_dx_context_bind(struct vmw_resource *res,
507 struct ttm_validate_buffer *val_buf)
509 struct vmw_private *dev_priv = res->dev_priv;
510 struct {
511 SVGA3dCmdHeader header;
512 SVGA3dCmdDXBindContext body;
513 } *cmd;
514 struct ttm_buffer_object *bo = val_buf->bo;
516 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
518 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
519 if (unlikely(cmd == NULL))
520 return -ENOMEM;
522 cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
523 cmd->header.size = sizeof(cmd->body);
524 cmd->body.cid = res->id;
525 cmd->body.mobid = bo->mem.start;
526 cmd->body.validContents = res->backup_dirty;
527 res->backup_dirty = false;
528 vmw_fifo_commit(dev_priv, sizeof(*cmd));
531 return 0;
535 * vmw_dx_context_scrub_cotables - Scrub all bindings and
536 * cotables from a context
538 * @ctx: Pointer to the context resource
539 * @readback: Whether to save the otable contents on scrubbing.
541 * COtables must be unbound before their context, but unbinding requires
542 * the backup buffer being reserved, whereas scrubbing does not.
543 * This function scrubs all cotables of a context, potentially reading back
544 * the contents into their backup buffers. However, scrubbing cotables
545 * also makes the device context invalid, so scrub all bindings first so
546 * that doesn't have to be done later with an invalid context.
548 void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
549 bool readback)
551 struct vmw_user_context *uctx =
552 container_of(ctx, struct vmw_user_context, res);
553 u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
554 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
555 int i;
557 vmw_binding_state_scrub(uctx->cbs);
558 for (i = 0; i < cotable_max; ++i) {
559 struct vmw_resource *res;
561 /* Avoid racing with ongoing cotable destruction. */
562 spin_lock(&uctx->cotable_lock);
563 res = uctx->cotables[vmw_cotable_scrub_order[i]];
564 if (res)
565 res = vmw_resource_reference_unless_doomed(res);
566 spin_unlock(&uctx->cotable_lock);
567 if (!res)
568 continue;
570 WARN_ON(vmw_cotable_scrub(res, readback));
571 vmw_resource_unreference(&res);
575 static int vmw_dx_context_unbind(struct vmw_resource *res,
576 bool readback,
577 struct ttm_validate_buffer *val_buf)
579 struct vmw_private *dev_priv = res->dev_priv;
580 struct ttm_buffer_object *bo = val_buf->bo;
581 struct vmw_fence_obj *fence;
582 struct vmw_user_context *uctx =
583 container_of(res, struct vmw_user_context, res);
585 struct {
586 SVGA3dCmdHeader header;
587 SVGA3dCmdDXReadbackContext body;
588 } *cmd1;
589 struct {
590 SVGA3dCmdHeader header;
591 SVGA3dCmdDXBindContext body;
592 } *cmd2;
593 uint32_t submit_size;
594 uint8_t *cmd;
597 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
599 mutex_lock(&dev_priv->binding_mutex);
600 vmw_dx_context_scrub_cotables(res, readback);
602 if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
603 readback) {
604 WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
605 if (vmw_query_readback_all(uctx->dx_query_mob))
606 DRM_ERROR("Failed to read back query states\n");
609 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
611 cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
612 if (unlikely(cmd == NULL)) {
613 mutex_unlock(&dev_priv->binding_mutex);
614 return -ENOMEM;
617 cmd2 = (void *) cmd;
618 if (readback) {
619 cmd1 = (void *) cmd;
620 cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
621 cmd1->header.size = sizeof(cmd1->body);
622 cmd1->body.cid = res->id;
623 cmd2 = (void *) (&cmd1[1]);
625 cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
626 cmd2->header.size = sizeof(cmd2->body);
627 cmd2->body.cid = res->id;
628 cmd2->body.mobid = SVGA3D_INVALID_ID;
630 vmw_fifo_commit(dev_priv, submit_size);
631 mutex_unlock(&dev_priv->binding_mutex);
634 * Create a fence object and fence the backup buffer.
637 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
638 &fence, NULL);
640 vmw_bo_fence_single(bo, fence);
642 if (likely(fence != NULL))
643 vmw_fence_obj_unreference(&fence);
645 return 0;
648 static int vmw_dx_context_destroy(struct vmw_resource *res)
650 struct vmw_private *dev_priv = res->dev_priv;
651 struct {
652 SVGA3dCmdHeader header;
653 SVGA3dCmdDXDestroyContext body;
654 } *cmd;
656 if (likely(res->id == -1))
657 return 0;
659 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
660 if (unlikely(cmd == NULL))
661 return -ENOMEM;
663 cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
664 cmd->header.size = sizeof(cmd->body);
665 cmd->body.cid = res->id;
666 vmw_fifo_commit(dev_priv, sizeof(*cmd));
667 if (dev_priv->query_cid == res->id)
668 dev_priv->query_cid_valid = false;
669 vmw_resource_release_id(res);
670 vmw_fifo_resource_dec(dev_priv);
672 return 0;
676 * User-space context management:
679 static struct vmw_resource *
680 vmw_user_context_base_to_res(struct ttm_base_object *base)
682 return &(container_of(base, struct vmw_user_context, base)->res);
685 static void vmw_user_context_free(struct vmw_resource *res)
687 struct vmw_user_context *ctx =
688 container_of(res, struct vmw_user_context, res);
689 struct vmw_private *dev_priv = res->dev_priv;
691 if (ctx->cbs)
692 vmw_binding_state_free(ctx->cbs);
694 (void) vmw_context_bind_dx_query(res, NULL);
696 ttm_base_object_kfree(ctx, base);
697 ttm_mem_global_free(vmw_mem_glob(dev_priv),
698 vmw_user_context_size);
702 * This function is called when user space has no more references on the
703 * base object. It releases the base-object's reference on the resource object.
706 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
708 struct ttm_base_object *base = *p_base;
709 struct vmw_user_context *ctx =
710 container_of(base, struct vmw_user_context, base);
711 struct vmw_resource *res = &ctx->res;
713 *p_base = NULL;
714 vmw_resource_unreference(&res);
717 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
718 struct drm_file *file_priv)
720 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
721 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
723 return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
726 static int vmw_context_define(struct drm_device *dev, void *data,
727 struct drm_file *file_priv, bool dx)
729 struct vmw_private *dev_priv = vmw_priv(dev);
730 struct vmw_user_context *ctx;
731 struct vmw_resource *res;
732 struct vmw_resource *tmp;
733 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
734 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
735 struct ttm_operation_ctx ttm_opt_ctx = {
736 .interruptible = true,
737 .no_wait_gpu = false
739 int ret;
741 if (!has_sm4_context(dev_priv) && dx) {
742 VMW_DEBUG_USER("DX contexts not supported by device.\n");
743 return -EINVAL;
746 if (unlikely(vmw_user_context_size == 0))
747 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) +
748 ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) +
749 + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
751 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
752 if (unlikely(ret != 0))
753 return ret;
755 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
756 vmw_user_context_size,
757 &ttm_opt_ctx);
758 if (unlikely(ret != 0)) {
759 if (ret != -ERESTARTSYS)
760 DRM_ERROR("Out of graphics memory for context"
761 " creation.\n");
762 goto out_unlock;
765 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
766 if (unlikely(!ctx)) {
767 ttm_mem_global_free(vmw_mem_glob(dev_priv),
768 vmw_user_context_size);
769 ret = -ENOMEM;
770 goto out_unlock;
773 res = &ctx->res;
774 ctx->base.shareable = false;
775 ctx->base.tfile = NULL;
778 * From here on, the destructor takes over resource freeing.
781 ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
782 if (unlikely(ret != 0))
783 goto out_unlock;
785 tmp = vmw_resource_reference(&ctx->res);
786 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
787 &vmw_user_context_base_release, NULL);
789 if (unlikely(ret != 0)) {
790 vmw_resource_unreference(&tmp);
791 goto out_err;
794 arg->cid = ctx->base.handle;
795 out_err:
796 vmw_resource_unreference(&res);
797 out_unlock:
798 ttm_read_unlock(&dev_priv->reservation_sem);
799 return ret;
802 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
803 struct drm_file *file_priv)
805 return vmw_context_define(dev, data, file_priv, false);
808 int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
809 struct drm_file *file_priv)
811 union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
812 struct drm_vmw_context_arg *rep = &arg->rep;
814 switch (arg->req) {
815 case drm_vmw_context_legacy:
816 return vmw_context_define(dev, rep, file_priv, false);
817 case drm_vmw_context_dx:
818 return vmw_context_define(dev, rep, file_priv, true);
819 default:
820 break;
822 return -EINVAL;
826 * vmw_context_binding_list - Return a list of context bindings
828 * @ctx: The context resource
830 * Returns the current list of bindings of the given context. Note that
831 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
833 struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
835 struct vmw_user_context *uctx =
836 container_of(ctx, struct vmw_user_context, res);
838 return vmw_binding_state_list(uctx->cbs);
841 struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
843 return container_of(ctx, struct vmw_user_context, res)->man;
846 struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
847 SVGACOTableType cotable_type)
849 u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
850 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
852 if (cotable_type >= cotable_max)
853 return ERR_PTR(-EINVAL);
855 return container_of(ctx, struct vmw_user_context, res)->
856 cotables[cotable_type];
860 * vmw_context_binding_state -
861 * Return a pointer to a context binding state structure
863 * @ctx: The context resource
865 * Returns the current state of bindings of the given context. Note that
866 * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
868 struct vmw_ctx_binding_state *
869 vmw_context_binding_state(struct vmw_resource *ctx)
871 return container_of(ctx, struct vmw_user_context, res)->cbs;
875 * vmw_context_bind_dx_query -
876 * Sets query MOB for the context. If @mob is NULL, then this function will
877 * remove the association between the MOB and the context. This function
878 * assumes the binding_mutex is held.
880 * @ctx_res: The context resource
881 * @mob: a reference to the query MOB
883 * Returns -EINVAL if a MOB has already been set and does not match the one
884 * specified in the parameter. 0 otherwise.
886 int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
887 struct vmw_buffer_object *mob)
889 struct vmw_user_context *uctx =
890 container_of(ctx_res, struct vmw_user_context, res);
892 if (mob == NULL) {
893 if (uctx->dx_query_mob) {
894 uctx->dx_query_mob->dx_query_ctx = NULL;
895 vmw_bo_unreference(&uctx->dx_query_mob);
896 uctx->dx_query_mob = NULL;
899 return 0;
902 /* Can only have one MOB per context for queries */
903 if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
904 return -EINVAL;
906 mob->dx_query_ctx = ctx_res;
908 if (!uctx->dx_query_mob)
909 uctx->dx_query_mob = vmw_bo_reference(mob);
911 return 0;
915 * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
917 * @ctx_res: The context resource
919 struct vmw_buffer_object *
920 vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
922 struct vmw_user_context *uctx =
923 container_of(ctx_res, struct vmw_user_context, res);
925 return uctx->dx_query_mob;