Linux 2.6.33-rc6
[cris-mirror.git] / drivers / gpu / drm / vmwgfx / vmwgfx_resource.c
blobe01db120efff233679c9dc46eebb180c2de8b66e
1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_drm.h"
30 #include "ttm/ttm_object.h"
31 #include "ttm/ttm_placement.h"
32 #include "drmP.h"
34 #define VMW_RES_CONTEXT ttm_driver_type0
35 #define VMW_RES_SURFACE ttm_driver_type1
36 #define VMW_RES_STREAM ttm_driver_type2
38 struct vmw_user_context {
39 struct ttm_base_object base;
40 struct vmw_resource res;
43 struct vmw_user_surface {
44 struct ttm_base_object base;
45 struct vmw_surface srf;
48 struct vmw_user_dma_buffer {
49 struct ttm_base_object base;
50 struct vmw_dma_buffer dma;
53 struct vmw_bo_user_rep {
54 uint32_t handle;
55 uint64_t map_handle;
58 struct vmw_stream {
59 struct vmw_resource res;
60 uint32_t stream_id;
63 struct vmw_user_stream {
64 struct ttm_base_object base;
65 struct vmw_stream stream;
68 static inline struct vmw_dma_buffer *
69 vmw_dma_buffer(struct ttm_buffer_object *bo)
71 return container_of(bo, struct vmw_dma_buffer, base);
74 static inline struct vmw_user_dma_buffer *
75 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
77 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
78 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
81 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
83 kref_get(&res->kref);
84 return res;
87 static void vmw_resource_release(struct kref *kref)
89 struct vmw_resource *res =
90 container_of(kref, struct vmw_resource, kref);
91 struct vmw_private *dev_priv = res->dev_priv;
93 idr_remove(res->idr, res->id);
94 write_unlock(&dev_priv->resource_lock);
96 if (likely(res->hw_destroy != NULL))
97 res->hw_destroy(res);
99 if (res->res_free != NULL)
100 res->res_free(res);
101 else
102 kfree(res);
104 write_lock(&dev_priv->resource_lock);
107 void vmw_resource_unreference(struct vmw_resource **p_res)
109 struct vmw_resource *res = *p_res;
110 struct vmw_private *dev_priv = res->dev_priv;
112 *p_res = NULL;
113 write_lock(&dev_priv->resource_lock);
114 kref_put(&res->kref, vmw_resource_release);
115 write_unlock(&dev_priv->resource_lock);
118 static int vmw_resource_init(struct vmw_private *dev_priv,
119 struct vmw_resource *res,
120 struct idr *idr,
121 enum ttm_object_type obj_type,
122 void (*res_free) (struct vmw_resource *res))
124 int ret;
126 kref_init(&res->kref);
127 res->hw_destroy = NULL;
128 res->res_free = res_free;
129 res->res_type = obj_type;
130 res->idr = idr;
131 res->avail = false;
132 res->dev_priv = dev_priv;
134 do {
135 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
136 return -ENOMEM;
138 write_lock(&dev_priv->resource_lock);
139 ret = idr_get_new_above(idr, res, 1, &res->id);
140 write_unlock(&dev_priv->resource_lock);
142 } while (ret == -EAGAIN);
144 return ret;
148 * vmw_resource_activate
150 * @res: Pointer to the newly created resource
151 * @hw_destroy: Destroy function. NULL if none.
153 * Activate a resource after the hardware has been made aware of it.
154 * Set tye destroy function to @destroy. Typically this frees the
155 * resource and destroys the hardware resources associated with it.
156 * Activate basically means that the function vmw_resource_lookup will
157 * find it.
160 static void vmw_resource_activate(struct vmw_resource *res,
161 void (*hw_destroy) (struct vmw_resource *))
163 struct vmw_private *dev_priv = res->dev_priv;
165 write_lock(&dev_priv->resource_lock);
166 res->avail = true;
167 res->hw_destroy = hw_destroy;
168 write_unlock(&dev_priv->resource_lock);
171 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
172 struct idr *idr, int id)
174 struct vmw_resource *res;
176 read_lock(&dev_priv->resource_lock);
177 res = idr_find(idr, id);
178 if (res && res->avail)
179 kref_get(&res->kref);
180 else
181 res = NULL;
182 read_unlock(&dev_priv->resource_lock);
184 if (unlikely(res == NULL))
185 return NULL;
187 return res;
191 * Context management:
194 static void vmw_hw_context_destroy(struct vmw_resource *res)
197 struct vmw_private *dev_priv = res->dev_priv;
198 struct {
199 SVGA3dCmdHeader header;
200 SVGA3dCmdDestroyContext body;
201 } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
203 if (unlikely(cmd == NULL)) {
204 DRM_ERROR("Failed reserving FIFO space for surface "
205 "destruction.\n");
206 return;
209 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
210 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
211 cmd->body.cid = cpu_to_le32(res->id);
213 vmw_fifo_commit(dev_priv, sizeof(*cmd));
216 static int vmw_context_init(struct vmw_private *dev_priv,
217 struct vmw_resource *res,
218 void (*res_free) (struct vmw_resource *res))
220 int ret;
222 struct {
223 SVGA3dCmdHeader header;
224 SVGA3dCmdDefineContext body;
225 } *cmd;
227 ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
228 VMW_RES_CONTEXT, res_free);
230 if (unlikely(ret != 0)) {
231 if (res_free == NULL)
232 kfree(res);
233 else
234 res_free(res);
235 return ret;
238 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
239 if (unlikely(cmd == NULL)) {
240 DRM_ERROR("Fifo reserve failed.\n");
241 vmw_resource_unreference(&res);
242 return -ENOMEM;
245 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
246 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
247 cmd->body.cid = cpu_to_le32(res->id);
249 vmw_fifo_commit(dev_priv, sizeof(*cmd));
250 vmw_resource_activate(res, vmw_hw_context_destroy);
251 return 0;
254 struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
256 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
257 int ret;
259 if (unlikely(res == NULL))
260 return NULL;
262 ret = vmw_context_init(dev_priv, res, NULL);
263 return (ret == 0) ? res : NULL;
267 * User-space context management:
270 static void vmw_user_context_free(struct vmw_resource *res)
272 struct vmw_user_context *ctx =
273 container_of(res, struct vmw_user_context, res);
275 kfree(ctx);
279 * This function is called when user space has no more references on the
280 * base object. It releases the base-object's reference on the resource object.
283 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
285 struct ttm_base_object *base = *p_base;
286 struct vmw_user_context *ctx =
287 container_of(base, struct vmw_user_context, base);
288 struct vmw_resource *res = &ctx->res;
290 *p_base = NULL;
291 vmw_resource_unreference(&res);
294 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
295 struct drm_file *file_priv)
297 struct vmw_private *dev_priv = vmw_priv(dev);
298 struct vmw_resource *res;
299 struct vmw_user_context *ctx;
300 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
301 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
302 int ret = 0;
304 res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
305 if (unlikely(res == NULL))
306 return -EINVAL;
308 if (res->res_free != &vmw_user_context_free) {
309 ret = -EINVAL;
310 goto out;
313 ctx = container_of(res, struct vmw_user_context, res);
314 if (ctx->base.tfile != tfile && !ctx->base.shareable) {
315 ret = -EPERM;
316 goto out;
319 ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
320 out:
321 vmw_resource_unreference(&res);
322 return ret;
325 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
326 struct drm_file *file_priv)
328 struct vmw_private *dev_priv = vmw_priv(dev);
329 struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
330 struct vmw_resource *res;
331 struct vmw_resource *tmp;
332 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
333 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
334 int ret;
336 if (unlikely(ctx == NULL))
337 return -ENOMEM;
339 res = &ctx->res;
340 ctx->base.shareable = false;
341 ctx->base.tfile = NULL;
343 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
344 if (unlikely(ret != 0))
345 return ret;
347 tmp = vmw_resource_reference(&ctx->res);
348 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
349 &vmw_user_context_base_release, NULL);
351 if (unlikely(ret != 0)) {
352 vmw_resource_unreference(&tmp);
353 goto out_err;
356 arg->cid = res->id;
357 out_err:
358 vmw_resource_unreference(&res);
359 return ret;
363 int vmw_context_check(struct vmw_private *dev_priv,
364 struct ttm_object_file *tfile,
365 int id)
367 struct vmw_resource *res;
368 int ret = 0;
370 read_lock(&dev_priv->resource_lock);
371 res = idr_find(&dev_priv->context_idr, id);
372 if (res && res->avail) {
373 struct vmw_user_context *ctx =
374 container_of(res, struct vmw_user_context, res);
375 if (ctx->base.tfile != tfile && !ctx->base.shareable)
376 ret = -EPERM;
377 } else
378 ret = -EINVAL;
379 read_unlock(&dev_priv->resource_lock);
381 return ret;
386 * Surface management.
389 static void vmw_hw_surface_destroy(struct vmw_resource *res)
392 struct vmw_private *dev_priv = res->dev_priv;
393 struct {
394 SVGA3dCmdHeader header;
395 SVGA3dCmdDestroySurface body;
396 } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
398 if (unlikely(cmd == NULL)) {
399 DRM_ERROR("Failed reserving FIFO space for surface "
400 "destruction.\n");
401 return;
404 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
405 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
406 cmd->body.sid = cpu_to_le32(res->id);
408 vmw_fifo_commit(dev_priv, sizeof(*cmd));
411 void vmw_surface_res_free(struct vmw_resource *res)
413 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
415 kfree(srf->sizes);
416 kfree(srf->snooper.image);
417 kfree(srf);
420 int vmw_surface_init(struct vmw_private *dev_priv,
421 struct vmw_surface *srf,
422 void (*res_free) (struct vmw_resource *res))
424 int ret;
425 struct {
426 SVGA3dCmdHeader header;
427 SVGA3dCmdDefineSurface body;
428 } *cmd;
429 SVGA3dSize *cmd_size;
430 struct vmw_resource *res = &srf->res;
431 struct drm_vmw_size *src_size;
432 size_t submit_size;
433 uint32_t cmd_len;
434 int i;
436 BUG_ON(res_free == NULL);
437 ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
438 VMW_RES_SURFACE, res_free);
440 if (unlikely(ret != 0)) {
441 res_free(res);
442 return ret;
445 submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
446 cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
448 cmd = vmw_fifo_reserve(dev_priv, submit_size);
449 if (unlikely(cmd == NULL)) {
450 DRM_ERROR("Fifo reserve failed for create surface.\n");
451 vmw_resource_unreference(&res);
452 return -ENOMEM;
455 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
456 cmd->header.size = cpu_to_le32(cmd_len);
457 cmd->body.sid = cpu_to_le32(res->id);
458 cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
459 cmd->body.format = cpu_to_le32(srf->format);
460 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
461 cmd->body.face[i].numMipLevels =
462 cpu_to_le32(srf->mip_levels[i]);
465 cmd += 1;
466 cmd_size = (SVGA3dSize *) cmd;
467 src_size = srf->sizes;
469 for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
470 cmd_size->width = cpu_to_le32(src_size->width);
471 cmd_size->height = cpu_to_le32(src_size->height);
472 cmd_size->depth = cpu_to_le32(src_size->depth);
475 vmw_fifo_commit(dev_priv, submit_size);
476 vmw_resource_activate(res, vmw_hw_surface_destroy);
477 return 0;
480 static void vmw_user_surface_free(struct vmw_resource *res)
482 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
483 struct vmw_user_surface *user_srf =
484 container_of(srf, struct vmw_user_surface, srf);
486 kfree(srf->sizes);
487 kfree(srf->snooper.image);
488 kfree(user_srf);
491 int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
492 struct ttm_object_file *tfile,
493 uint32_t handle, struct vmw_surface **out)
495 struct vmw_resource *res;
496 struct vmw_surface *srf;
497 struct vmw_user_surface *user_srf;
498 struct ttm_base_object *base;
499 int ret = -EINVAL;
501 base = ttm_base_object_lookup(tfile, handle);
502 if (unlikely(base == NULL))
503 return -EINVAL;
505 if (unlikely(base->object_type != VMW_RES_SURFACE))
506 goto out_bad_resource;
508 user_srf = container_of(base, struct vmw_user_surface, base);
509 srf = &user_srf->srf;
510 res = &srf->res;
512 read_lock(&dev_priv->resource_lock);
514 if (!res->avail || res->res_free != &vmw_user_surface_free) {
515 read_unlock(&dev_priv->resource_lock);
516 goto out_bad_resource;
519 kref_get(&res->kref);
520 read_unlock(&dev_priv->resource_lock);
522 *out = srf;
523 ret = 0;
525 out_bad_resource:
526 ttm_base_object_unref(&base);
528 return ret;
531 static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
533 struct ttm_base_object *base = *p_base;
534 struct vmw_user_surface *user_srf =
535 container_of(base, struct vmw_user_surface, base);
536 struct vmw_resource *res = &user_srf->srf.res;
538 *p_base = NULL;
539 vmw_resource_unreference(&res);
542 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
543 struct drm_file *file_priv)
545 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
546 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
548 return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
551 int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
552 struct drm_file *file_priv)
554 struct vmw_private *dev_priv = vmw_priv(dev);
555 struct vmw_user_surface *user_srf =
556 kmalloc(sizeof(*user_srf), GFP_KERNEL);
557 struct vmw_surface *srf;
558 struct vmw_resource *res;
559 struct vmw_resource *tmp;
560 union drm_vmw_surface_create_arg *arg =
561 (union drm_vmw_surface_create_arg *)data;
562 struct drm_vmw_surface_create_req *req = &arg->req;
563 struct drm_vmw_surface_arg *rep = &arg->rep;
564 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
565 struct drm_vmw_size __user *user_sizes;
566 int ret;
567 int i;
569 if (unlikely(user_srf == NULL))
570 return -ENOMEM;
572 srf = &user_srf->srf;
573 res = &srf->res;
575 srf->flags = req->flags;
576 srf->format = req->format;
577 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
578 srf->num_sizes = 0;
579 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
580 srf->num_sizes += srf->mip_levels[i];
582 if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
583 DRM_VMW_MAX_MIP_LEVELS) {
584 ret = -EINVAL;
585 goto out_err0;
588 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
589 if (unlikely(srf->sizes == NULL)) {
590 ret = -ENOMEM;
591 goto out_err0;
594 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
595 req->size_addr;
597 ret = copy_from_user(srf->sizes, user_sizes,
598 srf->num_sizes * sizeof(*srf->sizes));
599 if (unlikely(ret != 0))
600 goto out_err1;
603 if (srf->flags & (1 << 9) &&
604 srf->num_sizes == 1 &&
605 srf->sizes[0].width == 64 &&
606 srf->sizes[0].height == 64 &&
607 srf->format == SVGA3D_A8R8G8B8) {
609 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
610 /* clear the image */
611 if (srf->snooper.image) {
612 memset(srf->snooper.image, 0x00, 64 * 64 * 4);
613 } else {
614 DRM_ERROR("Failed to allocate cursor_image\n");
615 ret = -ENOMEM;
616 goto out_err1;
618 } else {
619 srf->snooper.image = NULL;
621 srf->snooper.crtc = NULL;
623 user_srf->base.shareable = false;
624 user_srf->base.tfile = NULL;
627 * From this point, the generic resource management functions
628 * destroy the object on failure.
631 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
632 if (unlikely(ret != 0))
633 return ret;
635 tmp = vmw_resource_reference(&srf->res);
636 ret = ttm_base_object_init(tfile, &user_srf->base,
637 req->shareable, VMW_RES_SURFACE,
638 &vmw_user_surface_base_release, NULL);
640 if (unlikely(ret != 0)) {
641 vmw_resource_unreference(&tmp);
642 vmw_resource_unreference(&res);
643 return ret;
646 rep->sid = user_srf->base.hash.key;
647 if (rep->sid == SVGA3D_INVALID_ID)
648 DRM_ERROR("Created bad Surface ID.\n");
650 vmw_resource_unreference(&res);
651 return 0;
652 out_err1:
653 kfree(srf->sizes);
654 out_err0:
655 kfree(user_srf);
656 return ret;
659 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
660 struct drm_file *file_priv)
662 union drm_vmw_surface_reference_arg *arg =
663 (union drm_vmw_surface_reference_arg *)data;
664 struct drm_vmw_surface_arg *req = &arg->req;
665 struct drm_vmw_surface_create_req *rep = &arg->rep;
666 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
667 struct vmw_surface *srf;
668 struct vmw_user_surface *user_srf;
669 struct drm_vmw_size __user *user_sizes;
670 struct ttm_base_object *base;
671 int ret = -EINVAL;
673 base = ttm_base_object_lookup(tfile, req->sid);
674 if (unlikely(base == NULL)) {
675 DRM_ERROR("Could not find surface to reference.\n");
676 return -EINVAL;
679 if (unlikely(base->object_type != VMW_RES_SURFACE))
680 goto out_bad_resource;
682 user_srf = container_of(base, struct vmw_user_surface, base);
683 srf = &user_srf->srf;
685 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
686 if (unlikely(ret != 0)) {
687 DRM_ERROR("Could not add a reference to a surface.\n");
688 goto out_no_reference;
691 rep->flags = srf->flags;
692 rep->format = srf->format;
693 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
694 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
695 rep->size_addr;
697 if (user_sizes)
698 ret = copy_to_user(user_sizes, srf->sizes,
699 srf->num_sizes * sizeof(*srf->sizes));
700 if (unlikely(ret != 0))
701 DRM_ERROR("copy_to_user failed %p %u\n",
702 user_sizes, srf->num_sizes);
703 out_bad_resource:
704 out_no_reference:
705 ttm_base_object_unref(&base);
707 return ret;
710 int vmw_surface_check(struct vmw_private *dev_priv,
711 struct ttm_object_file *tfile,
712 uint32_t handle, int *id)
714 struct ttm_base_object *base;
715 struct vmw_user_surface *user_srf;
717 int ret = -EPERM;
719 base = ttm_base_object_lookup(tfile, handle);
720 if (unlikely(base == NULL))
721 return -EINVAL;
723 if (unlikely(base->object_type != VMW_RES_SURFACE))
724 goto out_bad_surface;
726 user_srf = container_of(base, struct vmw_user_surface, base);
727 *id = user_srf->srf.res.id;
728 ret = 0;
730 out_bad_surface:
732 * FIXME: May deadlock here when called from the
733 * command parsing code.
736 ttm_base_object_unref(&base);
737 return ret;
741 * Buffer management.
744 static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
745 unsigned long num_pages)
747 static size_t bo_user_size = ~0;
749 size_t page_array_size =
750 (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
752 if (unlikely(bo_user_size == ~0)) {
753 bo_user_size = glob->ttm_bo_extra_size +
754 ttm_round_pot(sizeof(struct vmw_dma_buffer));
757 return bo_user_size + page_array_size;
760 void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
762 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
763 struct ttm_bo_global *glob = bo->glob;
764 struct vmw_private *dev_priv =
765 container_of(bo->bdev, struct vmw_private, bdev);
767 if (vmw_bo->gmr_bound) {
768 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
769 spin_lock(&glob->lru_lock);
770 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
771 spin_unlock(&glob->lru_lock);
772 vmw_bo->gmr_bound = false;
776 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
778 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
779 struct ttm_bo_global *glob = bo->glob;
781 vmw_dmabuf_gmr_unbind(bo);
782 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
783 kfree(vmw_bo);
786 int vmw_dmabuf_init(struct vmw_private *dev_priv,
787 struct vmw_dma_buffer *vmw_bo,
788 size_t size, struct ttm_placement *placement,
789 bool interruptible,
790 void (*bo_free) (struct ttm_buffer_object *bo))
792 struct ttm_bo_device *bdev = &dev_priv->bdev;
793 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
794 size_t acc_size;
795 int ret;
797 BUG_ON(!bo_free);
799 acc_size =
800 vmw_dmabuf_acc_size(bdev->glob,
801 (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
803 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
804 if (unlikely(ret != 0)) {
805 /* we must free the bo here as
806 * ttm_buffer_object_init does so as well */
807 bo_free(&vmw_bo->base);
808 return ret;
811 memset(vmw_bo, 0, sizeof(*vmw_bo));
813 INIT_LIST_HEAD(&vmw_bo->gmr_lru);
814 INIT_LIST_HEAD(&vmw_bo->validate_list);
815 vmw_bo->gmr_id = 0;
816 vmw_bo->gmr_bound = false;
818 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
819 ttm_bo_type_device, placement,
820 0, 0, interruptible,
821 NULL, acc_size, bo_free);
822 return ret;
825 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
827 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
828 struct ttm_bo_global *glob = bo->glob;
830 vmw_dmabuf_gmr_unbind(bo);
831 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
832 kfree(vmw_user_bo);
835 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
837 struct vmw_user_dma_buffer *vmw_user_bo;
838 struct ttm_base_object *base = *p_base;
839 struct ttm_buffer_object *bo;
841 *p_base = NULL;
843 if (unlikely(base == NULL))
844 return;
846 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
847 bo = &vmw_user_bo->dma.base;
848 ttm_bo_unref(&bo);
851 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
852 struct drm_file *file_priv)
854 struct vmw_private *dev_priv = vmw_priv(dev);
855 union drm_vmw_alloc_dmabuf_arg *arg =
856 (union drm_vmw_alloc_dmabuf_arg *)data;
857 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
858 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
859 struct vmw_user_dma_buffer *vmw_user_bo;
860 struct ttm_buffer_object *tmp;
861 struct vmw_master *vmaster = vmw_master(file_priv->master);
862 int ret;
864 vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
865 if (unlikely(vmw_user_bo == NULL))
866 return -ENOMEM;
868 ret = ttm_read_lock(&vmaster->lock, true);
869 if (unlikely(ret != 0)) {
870 kfree(vmw_user_bo);
871 return ret;
874 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
875 &vmw_vram_sys_placement, true,
876 &vmw_user_dmabuf_destroy);
877 if (unlikely(ret != 0))
878 return ret;
880 tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
881 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
882 &vmw_user_bo->base,
883 false,
884 ttm_buffer_type,
885 &vmw_user_dmabuf_release, NULL);
886 if (unlikely(ret != 0)) {
887 ttm_bo_unref(&tmp);
888 } else {
889 rep->handle = vmw_user_bo->base.hash.key;
890 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
891 rep->cur_gmr_id = vmw_user_bo->base.hash.key;
892 rep->cur_gmr_offset = 0;
894 ttm_bo_unref(&tmp);
896 ttm_read_unlock(&vmaster->lock);
898 return 0;
901 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
902 struct drm_file *file_priv)
904 struct drm_vmw_unref_dmabuf_arg *arg =
905 (struct drm_vmw_unref_dmabuf_arg *)data;
907 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
908 arg->handle,
909 TTM_REF_USAGE);
912 uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
913 uint32_t cur_validate_node)
915 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
917 if (likely(vmw_bo->on_validate_list))
918 return vmw_bo->cur_validate_node;
920 vmw_bo->cur_validate_node = cur_validate_node;
921 vmw_bo->on_validate_list = true;
923 return cur_validate_node;
926 void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
928 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
930 vmw_bo->on_validate_list = false;
933 uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
935 struct vmw_dma_buffer *vmw_bo;
937 if (bo->mem.mem_type == TTM_PL_VRAM)
938 return SVGA_GMR_FRAMEBUFFER;
940 vmw_bo = vmw_dma_buffer(bo);
942 return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
945 void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
947 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
948 vmw_bo->gmr_bound = true;
949 vmw_bo->gmr_id = id;
952 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
953 uint32_t handle, struct vmw_dma_buffer **out)
955 struct vmw_user_dma_buffer *vmw_user_bo;
956 struct ttm_base_object *base;
958 base = ttm_base_object_lookup(tfile, handle);
959 if (unlikely(base == NULL)) {
960 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
961 (unsigned long)handle);
962 return -ESRCH;
965 if (unlikely(base->object_type != ttm_buffer_type)) {
966 ttm_base_object_unref(&base);
967 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
968 (unsigned long)handle);
969 return -EINVAL;
972 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
973 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
974 ttm_base_object_unref(&base);
975 *out = &vmw_user_bo->dma;
977 return 0;
981 * TODO: Implement a gmr id eviction mechanism. Currently we just fail
982 * when we're out of ids, causing GMR space to be allocated
983 * out of VRAM.
986 int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
988 struct ttm_bo_global *glob = dev_priv->bdev.glob;
989 int id;
990 int ret;
992 do {
993 if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
994 return -ENOMEM;
996 spin_lock(&glob->lru_lock);
997 ret = ida_get_new(&dev_priv->gmr_ida, &id);
998 spin_unlock(&glob->lru_lock);
999 } while (ret == -EAGAIN);
1001 if (unlikely(ret != 0))
1002 return ret;
1004 if (unlikely(id >= dev_priv->max_gmr_ids)) {
1005 spin_lock(&glob->lru_lock);
1006 ida_remove(&dev_priv->gmr_ida, id);
1007 spin_unlock(&glob->lru_lock);
1008 return -EBUSY;
1011 *p_id = (uint32_t) id;
1012 return 0;
1016 * Stream managment
1019 static void vmw_stream_destroy(struct vmw_resource *res)
1021 struct vmw_private *dev_priv = res->dev_priv;
1022 struct vmw_stream *stream;
1023 int ret;
1025 DRM_INFO("%s: unref\n", __func__);
1026 stream = container_of(res, struct vmw_stream, res);
1028 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
1029 WARN_ON(ret != 0);
1032 static int vmw_stream_init(struct vmw_private *dev_priv,
1033 struct vmw_stream *stream,
1034 void (*res_free) (struct vmw_resource *res))
1036 struct vmw_resource *res = &stream->res;
1037 int ret;
1039 ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
1040 VMW_RES_STREAM, res_free);
1042 if (unlikely(ret != 0)) {
1043 if (res_free == NULL)
1044 kfree(stream);
1045 else
1046 res_free(&stream->res);
1047 return ret;
1050 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
1051 if (ret) {
1052 vmw_resource_unreference(&res);
1053 return ret;
1056 DRM_INFO("%s: claimed\n", __func__);
1058 vmw_resource_activate(&stream->res, vmw_stream_destroy);
1059 return 0;
1063 * User-space context management:
1066 static void vmw_user_stream_free(struct vmw_resource *res)
1068 struct vmw_user_stream *stream =
1069 container_of(res, struct vmw_user_stream, stream.res);
1071 kfree(stream);
1075 * This function is called when user space has no more references on the
1076 * base object. It releases the base-object's reference on the resource object.
1079 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
1081 struct ttm_base_object *base = *p_base;
1082 struct vmw_user_stream *stream =
1083 container_of(base, struct vmw_user_stream, base);
1084 struct vmw_resource *res = &stream->stream.res;
1086 *p_base = NULL;
1087 vmw_resource_unreference(&res);
1090 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1091 struct drm_file *file_priv)
1093 struct vmw_private *dev_priv = vmw_priv(dev);
1094 struct vmw_resource *res;
1095 struct vmw_user_stream *stream;
1096 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1097 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1098 int ret = 0;
1100 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
1101 if (unlikely(res == NULL))
1102 return -EINVAL;
1104 if (res->res_free != &vmw_user_stream_free) {
1105 ret = -EINVAL;
1106 goto out;
1109 stream = container_of(res, struct vmw_user_stream, stream.res);
1110 if (stream->base.tfile != tfile) {
1111 ret = -EINVAL;
1112 goto out;
1115 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
1116 out:
1117 vmw_resource_unreference(&res);
1118 return ret;
1121 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1122 struct drm_file *file_priv)
1124 struct vmw_private *dev_priv = vmw_priv(dev);
1125 struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1126 struct vmw_resource *res;
1127 struct vmw_resource *tmp;
1128 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1129 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1130 int ret;
1132 if (unlikely(stream == NULL))
1133 return -ENOMEM;
1135 res = &stream->stream.res;
1136 stream->base.shareable = false;
1137 stream->base.tfile = NULL;
1139 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1140 if (unlikely(ret != 0))
1141 return ret;
1143 tmp = vmw_resource_reference(res);
1144 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
1145 &vmw_user_stream_base_release, NULL);
1147 if (unlikely(ret != 0)) {
1148 vmw_resource_unreference(&tmp);
1149 goto out_err;
1152 arg->stream_id = res->id;
1153 out_err:
1154 vmw_resource_unreference(&res);
1155 return ret;
1158 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1159 struct ttm_object_file *tfile,
1160 uint32_t *inout_id, struct vmw_resource **out)
1162 struct vmw_user_stream *stream;
1163 struct vmw_resource *res;
1164 int ret;
1166 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
1167 if (unlikely(res == NULL))
1168 return -EINVAL;
1170 if (res->res_free != &vmw_user_stream_free) {
1171 ret = -EINVAL;
1172 goto err_ref;
1175 stream = container_of(res, struct vmw_user_stream, stream.res);
1176 if (stream->base.tfile != tfile) {
1177 ret = -EPERM;
1178 goto err_ref;
1181 *inout_id = stream->stream.stream_id;
1182 *out = res;
1183 return 0;
1184 err_ref:
1185 vmw_resource_unreference(&res);
1186 return ret;