1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_drm.h"
30 #include "ttm/ttm_object.h"
31 #include "ttm/ttm_placement.h"
34 #define VMW_RES_CONTEXT ttm_driver_type0
35 #define VMW_RES_SURFACE ttm_driver_type1
36 #define VMW_RES_STREAM ttm_driver_type2
38 struct vmw_user_context
{
39 struct ttm_base_object base
;
40 struct vmw_resource res
;
43 struct vmw_user_surface
{
44 struct ttm_base_object base
;
45 struct vmw_surface srf
;
48 struct vmw_user_dma_buffer
{
49 struct ttm_base_object base
;
50 struct vmw_dma_buffer dma
;
53 struct vmw_bo_user_rep
{
59 struct vmw_resource res
;
63 struct vmw_user_stream
{
64 struct ttm_base_object base
;
65 struct vmw_stream stream
;
68 static inline struct vmw_dma_buffer
*
69 vmw_dma_buffer(struct ttm_buffer_object
*bo
)
71 return container_of(bo
, struct vmw_dma_buffer
, base
);
74 static inline struct vmw_user_dma_buffer
*
75 vmw_user_dma_buffer(struct ttm_buffer_object
*bo
)
77 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
78 return container_of(vmw_bo
, struct vmw_user_dma_buffer
, dma
);
81 struct vmw_resource
*vmw_resource_reference(struct vmw_resource
*res
)
87 static void vmw_resource_release(struct kref
*kref
)
89 struct vmw_resource
*res
=
90 container_of(kref
, struct vmw_resource
, kref
);
91 struct vmw_private
*dev_priv
= res
->dev_priv
;
93 idr_remove(res
->idr
, res
->id
);
94 write_unlock(&dev_priv
->resource_lock
);
96 if (likely(res
->hw_destroy
!= NULL
))
99 if (res
->res_free
!= NULL
)
104 write_lock(&dev_priv
->resource_lock
);
107 void vmw_resource_unreference(struct vmw_resource
**p_res
)
109 struct vmw_resource
*res
= *p_res
;
110 struct vmw_private
*dev_priv
= res
->dev_priv
;
113 write_lock(&dev_priv
->resource_lock
);
114 kref_put(&res
->kref
, vmw_resource_release
);
115 write_unlock(&dev_priv
->resource_lock
);
118 static int vmw_resource_init(struct vmw_private
*dev_priv
,
119 struct vmw_resource
*res
,
121 enum ttm_object_type obj_type
,
122 void (*res_free
) (struct vmw_resource
*res
))
126 kref_init(&res
->kref
);
127 res
->hw_destroy
= NULL
;
128 res
->res_free
= res_free
;
129 res
->res_type
= obj_type
;
132 res
->dev_priv
= dev_priv
;
135 if (unlikely(idr_pre_get(idr
, GFP_KERNEL
) == 0))
138 write_lock(&dev_priv
->resource_lock
);
139 ret
= idr_get_new_above(idr
, res
, 1, &res
->id
);
140 write_unlock(&dev_priv
->resource_lock
);
142 } while (ret
== -EAGAIN
);
148 * vmw_resource_activate
150 * @res: Pointer to the newly created resource
151 * @hw_destroy: Destroy function. NULL if none.
153 * Activate a resource after the hardware has been made aware of it.
154 * Set tye destroy function to @destroy. Typically this frees the
155 * resource and destroys the hardware resources associated with it.
156 * Activate basically means that the function vmw_resource_lookup will
160 static void vmw_resource_activate(struct vmw_resource
*res
,
161 void (*hw_destroy
) (struct vmw_resource
*))
163 struct vmw_private
*dev_priv
= res
->dev_priv
;
165 write_lock(&dev_priv
->resource_lock
);
167 res
->hw_destroy
= hw_destroy
;
168 write_unlock(&dev_priv
->resource_lock
);
171 struct vmw_resource
*vmw_resource_lookup(struct vmw_private
*dev_priv
,
172 struct idr
*idr
, int id
)
174 struct vmw_resource
*res
;
176 read_lock(&dev_priv
->resource_lock
);
177 res
= idr_find(idr
, id
);
178 if (res
&& res
->avail
)
179 kref_get(&res
->kref
);
182 read_unlock(&dev_priv
->resource_lock
);
184 if (unlikely(res
== NULL
))
191 * Context management:
194 static void vmw_hw_context_destroy(struct vmw_resource
*res
)
197 struct vmw_private
*dev_priv
= res
->dev_priv
;
199 SVGA3dCmdHeader header
;
200 SVGA3dCmdDestroyContext body
;
201 } *cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
203 if (unlikely(cmd
== NULL
)) {
204 DRM_ERROR("Failed reserving FIFO space for surface "
209 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY
);
210 cmd
->header
.size
= cpu_to_le32(sizeof(cmd
->body
));
211 cmd
->body
.cid
= cpu_to_le32(res
->id
);
213 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
216 static int vmw_context_init(struct vmw_private
*dev_priv
,
217 struct vmw_resource
*res
,
218 void (*res_free
) (struct vmw_resource
*res
))
223 SVGA3dCmdHeader header
;
224 SVGA3dCmdDefineContext body
;
227 ret
= vmw_resource_init(dev_priv
, res
, &dev_priv
->context_idr
,
228 VMW_RES_CONTEXT
, res_free
);
230 if (unlikely(ret
!= 0)) {
231 if (res_free
== NULL
)
238 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
239 if (unlikely(cmd
== NULL
)) {
240 DRM_ERROR("Fifo reserve failed.\n");
241 vmw_resource_unreference(&res
);
245 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE
);
246 cmd
->header
.size
= cpu_to_le32(sizeof(cmd
->body
));
247 cmd
->body
.cid
= cpu_to_le32(res
->id
);
249 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
250 vmw_resource_activate(res
, vmw_hw_context_destroy
);
254 struct vmw_resource
*vmw_context_alloc(struct vmw_private
*dev_priv
)
256 struct vmw_resource
*res
= kmalloc(sizeof(*res
), GFP_KERNEL
);
259 if (unlikely(res
== NULL
))
262 ret
= vmw_context_init(dev_priv
, res
, NULL
);
263 return (ret
== 0) ? res
: NULL
;
267 * User-space context management:
270 static void vmw_user_context_free(struct vmw_resource
*res
)
272 struct vmw_user_context
*ctx
=
273 container_of(res
, struct vmw_user_context
, res
);
279 * This function is called when user space has no more references on the
280 * base object. It releases the base-object's reference on the resource object.
283 static void vmw_user_context_base_release(struct ttm_base_object
**p_base
)
285 struct ttm_base_object
*base
= *p_base
;
286 struct vmw_user_context
*ctx
=
287 container_of(base
, struct vmw_user_context
, base
);
288 struct vmw_resource
*res
= &ctx
->res
;
291 vmw_resource_unreference(&res
);
294 int vmw_context_destroy_ioctl(struct drm_device
*dev
, void *data
,
295 struct drm_file
*file_priv
)
297 struct vmw_private
*dev_priv
= vmw_priv(dev
);
298 struct vmw_resource
*res
;
299 struct vmw_user_context
*ctx
;
300 struct drm_vmw_context_arg
*arg
= (struct drm_vmw_context_arg
*)data
;
301 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
304 res
= vmw_resource_lookup(dev_priv
, &dev_priv
->context_idr
, arg
->cid
);
305 if (unlikely(res
== NULL
))
308 if (res
->res_free
!= &vmw_user_context_free
) {
313 ctx
= container_of(res
, struct vmw_user_context
, res
);
314 if (ctx
->base
.tfile
!= tfile
&& !ctx
->base
.shareable
) {
319 ttm_ref_object_base_unref(tfile
, ctx
->base
.hash
.key
, TTM_REF_USAGE
);
321 vmw_resource_unreference(&res
);
325 int vmw_context_define_ioctl(struct drm_device
*dev
, void *data
,
326 struct drm_file
*file_priv
)
328 struct vmw_private
*dev_priv
= vmw_priv(dev
);
329 struct vmw_user_context
*ctx
= kmalloc(sizeof(*ctx
), GFP_KERNEL
);
330 struct vmw_resource
*res
;
331 struct vmw_resource
*tmp
;
332 struct drm_vmw_context_arg
*arg
= (struct drm_vmw_context_arg
*)data
;
333 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
336 if (unlikely(ctx
== NULL
))
340 ctx
->base
.shareable
= false;
341 ctx
->base
.tfile
= NULL
;
343 ret
= vmw_context_init(dev_priv
, res
, vmw_user_context_free
);
344 if (unlikely(ret
!= 0))
347 tmp
= vmw_resource_reference(&ctx
->res
);
348 ret
= ttm_base_object_init(tfile
, &ctx
->base
, false, VMW_RES_CONTEXT
,
349 &vmw_user_context_base_release
, NULL
);
351 if (unlikely(ret
!= 0)) {
352 vmw_resource_unreference(&tmp
);
358 vmw_resource_unreference(&res
);
363 int vmw_context_check(struct vmw_private
*dev_priv
,
364 struct ttm_object_file
*tfile
,
367 struct vmw_resource
*res
;
370 read_lock(&dev_priv
->resource_lock
);
371 res
= idr_find(&dev_priv
->context_idr
, id
);
372 if (res
&& res
->avail
) {
373 struct vmw_user_context
*ctx
=
374 container_of(res
, struct vmw_user_context
, res
);
375 if (ctx
->base
.tfile
!= tfile
&& !ctx
->base
.shareable
)
379 read_unlock(&dev_priv
->resource_lock
);
386 * Surface management.
389 static void vmw_hw_surface_destroy(struct vmw_resource
*res
)
392 struct vmw_private
*dev_priv
= res
->dev_priv
;
394 SVGA3dCmdHeader header
;
395 SVGA3dCmdDestroySurface body
;
396 } *cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
398 if (unlikely(cmd
== NULL
)) {
399 DRM_ERROR("Failed reserving FIFO space for surface "
404 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY
);
405 cmd
->header
.size
= cpu_to_le32(sizeof(cmd
->body
));
406 cmd
->body
.sid
= cpu_to_le32(res
->id
);
408 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
411 void vmw_surface_res_free(struct vmw_resource
*res
)
413 struct vmw_surface
*srf
= container_of(res
, struct vmw_surface
, res
);
416 kfree(srf
->snooper
.image
);
420 int vmw_surface_init(struct vmw_private
*dev_priv
,
421 struct vmw_surface
*srf
,
422 void (*res_free
) (struct vmw_resource
*res
))
426 SVGA3dCmdHeader header
;
427 SVGA3dCmdDefineSurface body
;
429 SVGA3dSize
*cmd_size
;
430 struct vmw_resource
*res
= &srf
->res
;
431 struct drm_vmw_size
*src_size
;
436 BUG_ON(res_free
== NULL
);
437 ret
= vmw_resource_init(dev_priv
, res
, &dev_priv
->surface_idr
,
438 VMW_RES_SURFACE
, res_free
);
440 if (unlikely(ret
!= 0)) {
445 submit_size
= sizeof(*cmd
) + srf
->num_sizes
* sizeof(SVGA3dSize
);
446 cmd_len
= sizeof(cmd
->body
) + srf
->num_sizes
* sizeof(SVGA3dSize
);
448 cmd
= vmw_fifo_reserve(dev_priv
, submit_size
);
449 if (unlikely(cmd
== NULL
)) {
450 DRM_ERROR("Fifo reserve failed for create surface.\n");
451 vmw_resource_unreference(&res
);
455 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE
);
456 cmd
->header
.size
= cpu_to_le32(cmd_len
);
457 cmd
->body
.sid
= cpu_to_le32(res
->id
);
458 cmd
->body
.surfaceFlags
= cpu_to_le32(srf
->flags
);
459 cmd
->body
.format
= cpu_to_le32(srf
->format
);
460 for (i
= 0; i
< DRM_VMW_MAX_SURFACE_FACES
; ++i
) {
461 cmd
->body
.face
[i
].numMipLevels
=
462 cpu_to_le32(srf
->mip_levels
[i
]);
466 cmd_size
= (SVGA3dSize
*) cmd
;
467 src_size
= srf
->sizes
;
469 for (i
= 0; i
< srf
->num_sizes
; ++i
, cmd_size
++, src_size
++) {
470 cmd_size
->width
= cpu_to_le32(src_size
->width
);
471 cmd_size
->height
= cpu_to_le32(src_size
->height
);
472 cmd_size
->depth
= cpu_to_le32(src_size
->depth
);
475 vmw_fifo_commit(dev_priv
, submit_size
);
476 vmw_resource_activate(res
, vmw_hw_surface_destroy
);
480 static void vmw_user_surface_free(struct vmw_resource
*res
)
482 struct vmw_surface
*srf
= container_of(res
, struct vmw_surface
, res
);
483 struct vmw_user_surface
*user_srf
=
484 container_of(srf
, struct vmw_user_surface
, srf
);
487 kfree(srf
->snooper
.image
);
491 int vmw_user_surface_lookup_handle(struct vmw_private
*dev_priv
,
492 struct ttm_object_file
*tfile
,
493 uint32_t handle
, struct vmw_surface
**out
)
495 struct vmw_resource
*res
;
496 struct vmw_surface
*srf
;
497 struct vmw_user_surface
*user_srf
;
498 struct ttm_base_object
*base
;
501 base
= ttm_base_object_lookup(tfile
, handle
);
502 if (unlikely(base
== NULL
))
505 if (unlikely(base
->object_type
!= VMW_RES_SURFACE
))
506 goto out_bad_resource
;
508 user_srf
= container_of(base
, struct vmw_user_surface
, base
);
509 srf
= &user_srf
->srf
;
512 read_lock(&dev_priv
->resource_lock
);
514 if (!res
->avail
|| res
->res_free
!= &vmw_user_surface_free
) {
515 read_unlock(&dev_priv
->resource_lock
);
516 goto out_bad_resource
;
519 kref_get(&res
->kref
);
520 read_unlock(&dev_priv
->resource_lock
);
526 ttm_base_object_unref(&base
);
531 static void vmw_user_surface_base_release(struct ttm_base_object
**p_base
)
533 struct ttm_base_object
*base
= *p_base
;
534 struct vmw_user_surface
*user_srf
=
535 container_of(base
, struct vmw_user_surface
, base
);
536 struct vmw_resource
*res
= &user_srf
->srf
.res
;
539 vmw_resource_unreference(&res
);
542 int vmw_surface_destroy_ioctl(struct drm_device
*dev
, void *data
,
543 struct drm_file
*file_priv
)
545 struct drm_vmw_surface_arg
*arg
= (struct drm_vmw_surface_arg
*)data
;
546 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
548 return ttm_ref_object_base_unref(tfile
, arg
->sid
, TTM_REF_USAGE
);
551 int vmw_surface_define_ioctl(struct drm_device
*dev
, void *data
,
552 struct drm_file
*file_priv
)
554 struct vmw_private
*dev_priv
= vmw_priv(dev
);
555 struct vmw_user_surface
*user_srf
=
556 kmalloc(sizeof(*user_srf
), GFP_KERNEL
);
557 struct vmw_surface
*srf
;
558 struct vmw_resource
*res
;
559 struct vmw_resource
*tmp
;
560 union drm_vmw_surface_create_arg
*arg
=
561 (union drm_vmw_surface_create_arg
*)data
;
562 struct drm_vmw_surface_create_req
*req
= &arg
->req
;
563 struct drm_vmw_surface_arg
*rep
= &arg
->rep
;
564 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
565 struct drm_vmw_size __user
*user_sizes
;
569 if (unlikely(user_srf
== NULL
))
572 srf
= &user_srf
->srf
;
575 srf
->flags
= req
->flags
;
576 srf
->format
= req
->format
;
577 srf
->scanout
= req
->scanout
;
578 memcpy(srf
->mip_levels
, req
->mip_levels
, sizeof(srf
->mip_levels
));
580 for (i
= 0; i
< DRM_VMW_MAX_SURFACE_FACES
; ++i
)
581 srf
->num_sizes
+= srf
->mip_levels
[i
];
583 if (srf
->num_sizes
> DRM_VMW_MAX_SURFACE_FACES
*
584 DRM_VMW_MAX_MIP_LEVELS
) {
589 srf
->sizes
= kmalloc(srf
->num_sizes
* sizeof(*srf
->sizes
), GFP_KERNEL
);
590 if (unlikely(srf
->sizes
== NULL
)) {
595 user_sizes
= (struct drm_vmw_size __user
*)(unsigned long)
598 ret
= copy_from_user(srf
->sizes
, user_sizes
,
599 srf
->num_sizes
* sizeof(*srf
->sizes
));
600 if (unlikely(ret
!= 0))
604 srf
->num_sizes
== 1 &&
605 srf
->sizes
[0].width
== 64 &&
606 srf
->sizes
[0].height
== 64 &&
607 srf
->format
== SVGA3D_A8R8G8B8
) {
609 srf
->snooper
.image
= kmalloc(64 * 64 * 4, GFP_KERNEL
);
610 /* clear the image */
611 if (srf
->snooper
.image
) {
612 memset(srf
->snooper
.image
, 0x00, 64 * 64 * 4);
614 DRM_ERROR("Failed to allocate cursor_image\n");
619 srf
->snooper
.image
= NULL
;
621 srf
->snooper
.crtc
= NULL
;
623 user_srf
->base
.shareable
= false;
624 user_srf
->base
.tfile
= NULL
;
627 * From this point, the generic resource management functions
628 * destroy the object on failure.
631 ret
= vmw_surface_init(dev_priv
, srf
, vmw_user_surface_free
);
632 if (unlikely(ret
!= 0))
635 tmp
= vmw_resource_reference(&srf
->res
);
636 ret
= ttm_base_object_init(tfile
, &user_srf
->base
,
637 req
->shareable
, VMW_RES_SURFACE
,
638 &vmw_user_surface_base_release
, NULL
);
640 if (unlikely(ret
!= 0)) {
641 vmw_resource_unreference(&tmp
);
642 vmw_resource_unreference(&res
);
646 rep
->sid
= user_srf
->base
.hash
.key
;
647 if (rep
->sid
== SVGA3D_INVALID_ID
)
648 DRM_ERROR("Created bad Surface ID.\n");
650 vmw_resource_unreference(&res
);
659 int vmw_surface_reference_ioctl(struct drm_device
*dev
, void *data
,
660 struct drm_file
*file_priv
)
662 union drm_vmw_surface_reference_arg
*arg
=
663 (union drm_vmw_surface_reference_arg
*)data
;
664 struct drm_vmw_surface_arg
*req
= &arg
->req
;
665 struct drm_vmw_surface_create_req
*rep
= &arg
->rep
;
666 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
667 struct vmw_surface
*srf
;
668 struct vmw_user_surface
*user_srf
;
669 struct drm_vmw_size __user
*user_sizes
;
670 struct ttm_base_object
*base
;
673 base
= ttm_base_object_lookup(tfile
, req
->sid
);
674 if (unlikely(base
== NULL
)) {
675 DRM_ERROR("Could not find surface to reference.\n");
679 if (unlikely(base
->object_type
!= VMW_RES_SURFACE
))
680 goto out_bad_resource
;
682 user_srf
= container_of(base
, struct vmw_user_surface
, base
);
683 srf
= &user_srf
->srf
;
685 ret
= ttm_ref_object_add(tfile
, &user_srf
->base
, TTM_REF_USAGE
, NULL
);
686 if (unlikely(ret
!= 0)) {
687 DRM_ERROR("Could not add a reference to a surface.\n");
688 goto out_no_reference
;
691 rep
->flags
= srf
->flags
;
692 rep
->format
= srf
->format
;
693 memcpy(rep
->mip_levels
, srf
->mip_levels
, sizeof(srf
->mip_levels
));
694 user_sizes
= (struct drm_vmw_size __user
*)(unsigned long)
698 ret
= copy_to_user(user_sizes
, srf
->sizes
,
699 srf
->num_sizes
* sizeof(*srf
->sizes
));
700 if (unlikely(ret
!= 0))
701 DRM_ERROR("copy_to_user failed %p %u\n",
702 user_sizes
, srf
->num_sizes
);
705 ttm_base_object_unref(&base
);
710 int vmw_surface_check(struct vmw_private
*dev_priv
,
711 struct ttm_object_file
*tfile
,
712 uint32_t handle
, int *id
)
714 struct ttm_base_object
*base
;
715 struct vmw_user_surface
*user_srf
;
719 base
= ttm_base_object_lookup(tfile
, handle
);
720 if (unlikely(base
== NULL
))
723 if (unlikely(base
->object_type
!= VMW_RES_SURFACE
))
724 goto out_bad_surface
;
726 user_srf
= container_of(base
, struct vmw_user_surface
, base
);
727 *id
= user_srf
->srf
.res
.id
;
732 * FIXME: May deadlock here when called from the
733 * command parsing code.
736 ttm_base_object_unref(&base
);
744 static size_t vmw_dmabuf_acc_size(struct ttm_bo_global
*glob
,
745 unsigned long num_pages
)
747 static size_t bo_user_size
= ~0;
749 size_t page_array_size
=
750 (num_pages
* sizeof(void *) + PAGE_SIZE
- 1) & PAGE_MASK
;
752 if (unlikely(bo_user_size
== ~0)) {
753 bo_user_size
= glob
->ttm_bo_extra_size
+
754 ttm_round_pot(sizeof(struct vmw_dma_buffer
));
757 return bo_user_size
+ page_array_size
;
760 void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object
*bo
)
762 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
763 struct ttm_bo_global
*glob
= bo
->glob
;
764 struct vmw_private
*dev_priv
=
765 container_of(bo
->bdev
, struct vmw_private
, bdev
);
767 if (vmw_bo
->gmr_bound
) {
768 vmw_gmr_unbind(dev_priv
, vmw_bo
->gmr_id
);
769 spin_lock(&glob
->lru_lock
);
770 ida_remove(&dev_priv
->gmr_ida
, vmw_bo
->gmr_id
);
771 spin_unlock(&glob
->lru_lock
);
772 vmw_bo
->gmr_bound
= false;
776 void vmw_dmabuf_bo_free(struct ttm_buffer_object
*bo
)
778 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
779 struct ttm_bo_global
*glob
= bo
->glob
;
781 vmw_dmabuf_gmr_unbind(bo
);
782 ttm_mem_global_free(glob
->mem_glob
, bo
->acc_size
);
786 int vmw_dmabuf_init(struct vmw_private
*dev_priv
,
787 struct vmw_dma_buffer
*vmw_bo
,
788 size_t size
, struct ttm_placement
*placement
,
790 void (*bo_free
) (struct ttm_buffer_object
*bo
))
792 struct ttm_bo_device
*bdev
= &dev_priv
->bdev
;
793 struct ttm_mem_global
*mem_glob
= bdev
->glob
->mem_glob
;
800 vmw_dmabuf_acc_size(bdev
->glob
,
801 (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
);
803 ret
= ttm_mem_global_alloc(mem_glob
, acc_size
, false, false);
804 if (unlikely(ret
!= 0)) {
805 /* we must free the bo here as
806 * ttm_buffer_object_init does so as well */
807 bo_free(&vmw_bo
->base
);
811 memset(vmw_bo
, 0, sizeof(*vmw_bo
));
813 INIT_LIST_HEAD(&vmw_bo
->gmr_lru
);
814 INIT_LIST_HEAD(&vmw_bo
->validate_list
);
816 vmw_bo
->gmr_bound
= false;
818 ret
= ttm_bo_init(bdev
, &vmw_bo
->base
, size
,
819 ttm_bo_type_device
, placement
,
821 NULL
, acc_size
, bo_free
);
825 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object
*bo
)
827 struct vmw_user_dma_buffer
*vmw_user_bo
= vmw_user_dma_buffer(bo
);
828 struct ttm_bo_global
*glob
= bo
->glob
;
830 vmw_dmabuf_gmr_unbind(bo
);
831 ttm_mem_global_free(glob
->mem_glob
, bo
->acc_size
);
835 static void vmw_user_dmabuf_release(struct ttm_base_object
**p_base
)
837 struct vmw_user_dma_buffer
*vmw_user_bo
;
838 struct ttm_base_object
*base
= *p_base
;
839 struct ttm_buffer_object
*bo
;
843 if (unlikely(base
== NULL
))
846 vmw_user_bo
= container_of(base
, struct vmw_user_dma_buffer
, base
);
847 bo
= &vmw_user_bo
->dma
.base
;
851 int vmw_dmabuf_alloc_ioctl(struct drm_device
*dev
, void *data
,
852 struct drm_file
*file_priv
)
854 struct vmw_private
*dev_priv
= vmw_priv(dev
);
855 union drm_vmw_alloc_dmabuf_arg
*arg
=
856 (union drm_vmw_alloc_dmabuf_arg
*)data
;
857 struct drm_vmw_alloc_dmabuf_req
*req
= &arg
->req
;
858 struct drm_vmw_dmabuf_rep
*rep
= &arg
->rep
;
859 struct vmw_user_dma_buffer
*vmw_user_bo
;
860 struct ttm_buffer_object
*tmp
;
861 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
864 vmw_user_bo
= kzalloc(sizeof(*vmw_user_bo
), GFP_KERNEL
);
865 if (unlikely(vmw_user_bo
== NULL
))
868 ret
= ttm_read_lock(&vmaster
->lock
, true);
869 if (unlikely(ret
!= 0)) {
874 ret
= vmw_dmabuf_init(dev_priv
, &vmw_user_bo
->dma
, req
->size
,
875 &vmw_vram_sys_placement
, true,
876 &vmw_user_dmabuf_destroy
);
877 if (unlikely(ret
!= 0))
880 tmp
= ttm_bo_reference(&vmw_user_bo
->dma
.base
);
881 ret
= ttm_base_object_init(vmw_fpriv(file_priv
)->tfile
,
885 &vmw_user_dmabuf_release
, NULL
);
886 if (unlikely(ret
!= 0)) {
889 rep
->handle
= vmw_user_bo
->base
.hash
.key
;
890 rep
->map_handle
= vmw_user_bo
->dma
.base
.addr_space_offset
;
891 rep
->cur_gmr_id
= vmw_user_bo
->base
.hash
.key
;
892 rep
->cur_gmr_offset
= 0;
896 ttm_read_unlock(&vmaster
->lock
);
901 int vmw_dmabuf_unref_ioctl(struct drm_device
*dev
, void *data
,
902 struct drm_file
*file_priv
)
904 struct drm_vmw_unref_dmabuf_arg
*arg
=
905 (struct drm_vmw_unref_dmabuf_arg
*)data
;
907 return ttm_ref_object_base_unref(vmw_fpriv(file_priv
)->tfile
,
912 uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object
*bo
,
913 uint32_t cur_validate_node
)
915 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
917 if (likely(vmw_bo
->on_validate_list
))
918 return vmw_bo
->cur_validate_node
;
920 vmw_bo
->cur_validate_node
= cur_validate_node
;
921 vmw_bo
->on_validate_list
= true;
923 return cur_validate_node
;
926 void vmw_dmabuf_validate_clear(struct ttm_buffer_object
*bo
)
928 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
930 vmw_bo
->on_validate_list
= false;
933 uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object
*bo
)
935 struct vmw_dma_buffer
*vmw_bo
;
937 if (bo
->mem
.mem_type
== TTM_PL_VRAM
)
938 return SVGA_GMR_FRAMEBUFFER
;
940 vmw_bo
= vmw_dma_buffer(bo
);
942 return (vmw_bo
->gmr_bound
) ? vmw_bo
->gmr_id
: SVGA_GMR_NULL
;
945 void vmw_dmabuf_set_gmr(struct ttm_buffer_object
*bo
, uint32_t id
)
947 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
948 vmw_bo
->gmr_bound
= true;
952 int vmw_user_dmabuf_lookup(struct ttm_object_file
*tfile
,
953 uint32_t handle
, struct vmw_dma_buffer
**out
)
955 struct vmw_user_dma_buffer
*vmw_user_bo
;
956 struct ttm_base_object
*base
;
958 base
= ttm_base_object_lookup(tfile
, handle
);
959 if (unlikely(base
== NULL
)) {
960 printk(KERN_ERR
"Invalid buffer object handle 0x%08lx.\n",
961 (unsigned long)handle
);
965 if (unlikely(base
->object_type
!= ttm_buffer_type
)) {
966 ttm_base_object_unref(&base
);
967 printk(KERN_ERR
"Invalid buffer object handle 0x%08lx.\n",
968 (unsigned long)handle
);
972 vmw_user_bo
= container_of(base
, struct vmw_user_dma_buffer
, base
);
973 (void)ttm_bo_reference(&vmw_user_bo
->dma
.base
);
974 ttm_base_object_unref(&base
);
975 *out
= &vmw_user_bo
->dma
;
981 * TODO: Implement a gmr id eviction mechanism. Currently we just fail
982 * when we're out of ids, causing GMR space to be allocated
986 int vmw_gmr_id_alloc(struct vmw_private
*dev_priv
, uint32_t *p_id
)
988 struct ttm_bo_global
*glob
= dev_priv
->bdev
.glob
;
993 if (unlikely(ida_pre_get(&dev_priv
->gmr_ida
, GFP_KERNEL
) == 0))
996 spin_lock(&glob
->lru_lock
);
997 ret
= ida_get_new(&dev_priv
->gmr_ida
, &id
);
998 spin_unlock(&glob
->lru_lock
);
999 } while (ret
== -EAGAIN
);
1001 if (unlikely(ret
!= 0))
1004 if (unlikely(id
>= dev_priv
->max_gmr_ids
)) {
1005 spin_lock(&glob
->lru_lock
);
1006 ida_remove(&dev_priv
->gmr_ida
, id
);
1007 spin_unlock(&glob
->lru_lock
);
1011 *p_id
= (uint32_t) id
;
1019 static void vmw_stream_destroy(struct vmw_resource
*res
)
1021 struct vmw_private
*dev_priv
= res
->dev_priv
;
1022 struct vmw_stream
*stream
;
1025 DRM_INFO("%s: unref\n", __func__
);
1026 stream
= container_of(res
, struct vmw_stream
, res
);
1028 ret
= vmw_overlay_unref(dev_priv
, stream
->stream_id
);
1032 static int vmw_stream_init(struct vmw_private
*dev_priv
,
1033 struct vmw_stream
*stream
,
1034 void (*res_free
) (struct vmw_resource
*res
))
1036 struct vmw_resource
*res
= &stream
->res
;
1039 ret
= vmw_resource_init(dev_priv
, res
, &dev_priv
->stream_idr
,
1040 VMW_RES_STREAM
, res_free
);
1042 if (unlikely(ret
!= 0)) {
1043 if (res_free
== NULL
)
1046 res_free(&stream
->res
);
1050 ret
= vmw_overlay_claim(dev_priv
, &stream
->stream_id
);
1052 vmw_resource_unreference(&res
);
1056 DRM_INFO("%s: claimed\n", __func__
);
1058 vmw_resource_activate(&stream
->res
, vmw_stream_destroy
);
1063 * User-space context management:
1066 static void vmw_user_stream_free(struct vmw_resource
*res
)
1068 struct vmw_user_stream
*stream
=
1069 container_of(res
, struct vmw_user_stream
, stream
.res
);
1075 * This function is called when user space has no more references on the
1076 * base object. It releases the base-object's reference on the resource object.
1079 static void vmw_user_stream_base_release(struct ttm_base_object
**p_base
)
1081 struct ttm_base_object
*base
= *p_base
;
1082 struct vmw_user_stream
*stream
=
1083 container_of(base
, struct vmw_user_stream
, base
);
1084 struct vmw_resource
*res
= &stream
->stream
.res
;
1087 vmw_resource_unreference(&res
);
1090 int vmw_stream_unref_ioctl(struct drm_device
*dev
, void *data
,
1091 struct drm_file
*file_priv
)
1093 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1094 struct vmw_resource
*res
;
1095 struct vmw_user_stream
*stream
;
1096 struct drm_vmw_stream_arg
*arg
= (struct drm_vmw_stream_arg
*)data
;
1097 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1100 res
= vmw_resource_lookup(dev_priv
, &dev_priv
->stream_idr
, arg
->stream_id
);
1101 if (unlikely(res
== NULL
))
1104 if (res
->res_free
!= &vmw_user_stream_free
) {
1109 stream
= container_of(res
, struct vmw_user_stream
, stream
.res
);
1110 if (stream
->base
.tfile
!= tfile
) {
1115 ttm_ref_object_base_unref(tfile
, stream
->base
.hash
.key
, TTM_REF_USAGE
);
1117 vmw_resource_unreference(&res
);
1121 int vmw_stream_claim_ioctl(struct drm_device
*dev
, void *data
,
1122 struct drm_file
*file_priv
)
1124 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1125 struct vmw_user_stream
*stream
= kmalloc(sizeof(*stream
), GFP_KERNEL
);
1126 struct vmw_resource
*res
;
1127 struct vmw_resource
*tmp
;
1128 struct drm_vmw_stream_arg
*arg
= (struct drm_vmw_stream_arg
*)data
;
1129 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1132 if (unlikely(stream
== NULL
))
1135 res
= &stream
->stream
.res
;
1136 stream
->base
.shareable
= false;
1137 stream
->base
.tfile
= NULL
;
1139 ret
= vmw_stream_init(dev_priv
, &stream
->stream
, vmw_user_stream_free
);
1140 if (unlikely(ret
!= 0))
1143 tmp
= vmw_resource_reference(res
);
1144 ret
= ttm_base_object_init(tfile
, &stream
->base
, false, VMW_RES_STREAM
,
1145 &vmw_user_stream_base_release
, NULL
);
1147 if (unlikely(ret
!= 0)) {
1148 vmw_resource_unreference(&tmp
);
1152 arg
->stream_id
= res
->id
;
1154 vmw_resource_unreference(&res
);
1158 int vmw_user_stream_lookup(struct vmw_private
*dev_priv
,
1159 struct ttm_object_file
*tfile
,
1160 uint32_t *inout_id
, struct vmw_resource
**out
)
1162 struct vmw_user_stream
*stream
;
1163 struct vmw_resource
*res
;
1166 res
= vmw_resource_lookup(dev_priv
, &dev_priv
->stream_idr
, *inout_id
);
1167 if (unlikely(res
== NULL
))
1170 if (res
->res_free
!= &vmw_user_stream_free
) {
1175 stream
= container_of(res
, struct vmw_user_stream
, stream
.res
);
1176 if (stream
->base
.tfile
!= tfile
) {
1181 *inout_id
= stream
->stream
.stream_id
;
1185 vmw_resource_unreference(&res
);