1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_drm.h"
30 #include "ttm/ttm_object.h"
31 #include "ttm/ttm_placement.h"
34 #define VMW_RES_CONTEXT ttm_driver_type0
35 #define VMW_RES_SURFACE ttm_driver_type1
36 #define VMW_RES_STREAM ttm_driver_type2
38 struct vmw_user_context
{
39 struct ttm_base_object base
;
40 struct vmw_resource res
;
43 struct vmw_user_surface
{
44 struct ttm_base_object base
;
45 struct vmw_surface srf
;
48 struct vmw_user_dma_buffer
{
49 struct ttm_base_object base
;
50 struct vmw_dma_buffer dma
;
53 struct vmw_bo_user_rep
{
59 struct vmw_resource res
;
63 struct vmw_user_stream
{
64 struct ttm_base_object base
;
65 struct vmw_stream stream
;
68 static inline struct vmw_dma_buffer
*
69 vmw_dma_buffer(struct ttm_buffer_object
*bo
)
71 return container_of(bo
, struct vmw_dma_buffer
, base
);
74 static inline struct vmw_user_dma_buffer
*
75 vmw_user_dma_buffer(struct ttm_buffer_object
*bo
)
77 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
78 return container_of(vmw_bo
, struct vmw_user_dma_buffer
, dma
);
81 struct vmw_resource
*vmw_resource_reference(struct vmw_resource
*res
)
87 static void vmw_resource_release(struct kref
*kref
)
89 struct vmw_resource
*res
=
90 container_of(kref
, struct vmw_resource
, kref
);
91 struct vmw_private
*dev_priv
= res
->dev_priv
;
93 idr_remove(res
->idr
, res
->id
);
94 write_unlock(&dev_priv
->resource_lock
);
96 if (likely(res
->hw_destroy
!= NULL
))
99 if (res
->res_free
!= NULL
)
104 write_lock(&dev_priv
->resource_lock
);
107 void vmw_resource_unreference(struct vmw_resource
**p_res
)
109 struct vmw_resource
*res
= *p_res
;
110 struct vmw_private
*dev_priv
= res
->dev_priv
;
113 write_lock(&dev_priv
->resource_lock
);
114 kref_put(&res
->kref
, vmw_resource_release
);
115 write_unlock(&dev_priv
->resource_lock
);
118 static int vmw_resource_init(struct vmw_private
*dev_priv
,
119 struct vmw_resource
*res
,
121 enum ttm_object_type obj_type
,
122 void (*res_free
) (struct vmw_resource
*res
))
126 kref_init(&res
->kref
);
127 res
->hw_destroy
= NULL
;
128 res
->res_free
= res_free
;
129 res
->res_type
= obj_type
;
132 res
->dev_priv
= dev_priv
;
135 if (unlikely(idr_pre_get(idr
, GFP_KERNEL
) == 0))
138 write_lock(&dev_priv
->resource_lock
);
139 ret
= idr_get_new_above(idr
, res
, 1, &res
->id
);
140 write_unlock(&dev_priv
->resource_lock
);
142 } while (ret
== -EAGAIN
);
148 * vmw_resource_activate
150 * @res: Pointer to the newly created resource
151 * @hw_destroy: Destroy function. NULL if none.
153 * Activate a resource after the hardware has been made aware of it.
154 * Set tye destroy function to @destroy. Typically this frees the
155 * resource and destroys the hardware resources associated with it.
156 * Activate basically means that the function vmw_resource_lookup will
160 static void vmw_resource_activate(struct vmw_resource
*res
,
161 void (*hw_destroy
) (struct vmw_resource
*))
163 struct vmw_private
*dev_priv
= res
->dev_priv
;
165 write_lock(&dev_priv
->resource_lock
);
167 res
->hw_destroy
= hw_destroy
;
168 write_unlock(&dev_priv
->resource_lock
);
171 struct vmw_resource
*vmw_resource_lookup(struct vmw_private
*dev_priv
,
172 struct idr
*idr
, int id
)
174 struct vmw_resource
*res
;
176 read_lock(&dev_priv
->resource_lock
);
177 res
= idr_find(idr
, id
);
178 if (res
&& res
->avail
)
179 kref_get(&res
->kref
);
182 read_unlock(&dev_priv
->resource_lock
);
184 if (unlikely(res
== NULL
))
191 * Context management:
194 static void vmw_hw_context_destroy(struct vmw_resource
*res
)
197 struct vmw_private
*dev_priv
= res
->dev_priv
;
199 SVGA3dCmdHeader header
;
200 SVGA3dCmdDestroyContext body
;
201 } *cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
203 if (unlikely(cmd
== NULL
)) {
204 DRM_ERROR("Failed reserving FIFO space for surface "
209 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY
);
210 cmd
->header
.size
= cpu_to_le32(sizeof(cmd
->body
));
211 cmd
->body
.cid
= cpu_to_le32(res
->id
);
213 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
214 vmw_3d_resource_dec(dev_priv
);
217 static int vmw_context_init(struct vmw_private
*dev_priv
,
218 struct vmw_resource
*res
,
219 void (*res_free
) (struct vmw_resource
*res
))
224 SVGA3dCmdHeader header
;
225 SVGA3dCmdDefineContext body
;
228 ret
= vmw_resource_init(dev_priv
, res
, &dev_priv
->context_idr
,
229 VMW_RES_CONTEXT
, res_free
);
231 if (unlikely(ret
!= 0)) {
232 if (res_free
== NULL
)
239 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
240 if (unlikely(cmd
== NULL
)) {
241 DRM_ERROR("Fifo reserve failed.\n");
242 vmw_resource_unreference(&res
);
246 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE
);
247 cmd
->header
.size
= cpu_to_le32(sizeof(cmd
->body
));
248 cmd
->body
.cid
= cpu_to_le32(res
->id
);
250 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
251 (void) vmw_3d_resource_inc(dev_priv
);
252 vmw_resource_activate(res
, vmw_hw_context_destroy
);
256 struct vmw_resource
*vmw_context_alloc(struct vmw_private
*dev_priv
)
258 struct vmw_resource
*res
= kmalloc(sizeof(*res
), GFP_KERNEL
);
261 if (unlikely(res
== NULL
))
264 ret
= vmw_context_init(dev_priv
, res
, NULL
);
265 return (ret
== 0) ? res
: NULL
;
269 * User-space context management:
272 static void vmw_user_context_free(struct vmw_resource
*res
)
274 struct vmw_user_context
*ctx
=
275 container_of(res
, struct vmw_user_context
, res
);
281 * This function is called when user space has no more references on the
282 * base object. It releases the base-object's reference on the resource object.
285 static void vmw_user_context_base_release(struct ttm_base_object
**p_base
)
287 struct ttm_base_object
*base
= *p_base
;
288 struct vmw_user_context
*ctx
=
289 container_of(base
, struct vmw_user_context
, base
);
290 struct vmw_resource
*res
= &ctx
->res
;
293 vmw_resource_unreference(&res
);
296 int vmw_context_destroy_ioctl(struct drm_device
*dev
, void *data
,
297 struct drm_file
*file_priv
)
299 struct vmw_private
*dev_priv
= vmw_priv(dev
);
300 struct vmw_resource
*res
;
301 struct vmw_user_context
*ctx
;
302 struct drm_vmw_context_arg
*arg
= (struct drm_vmw_context_arg
*)data
;
303 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
306 res
= vmw_resource_lookup(dev_priv
, &dev_priv
->context_idr
, arg
->cid
);
307 if (unlikely(res
== NULL
))
310 if (res
->res_free
!= &vmw_user_context_free
) {
315 ctx
= container_of(res
, struct vmw_user_context
, res
);
316 if (ctx
->base
.tfile
!= tfile
&& !ctx
->base
.shareable
) {
321 ttm_ref_object_base_unref(tfile
, ctx
->base
.hash
.key
, TTM_REF_USAGE
);
323 vmw_resource_unreference(&res
);
327 int vmw_context_define_ioctl(struct drm_device
*dev
, void *data
,
328 struct drm_file
*file_priv
)
330 struct vmw_private
*dev_priv
= vmw_priv(dev
);
331 struct vmw_user_context
*ctx
= kmalloc(sizeof(*ctx
), GFP_KERNEL
);
332 struct vmw_resource
*res
;
333 struct vmw_resource
*tmp
;
334 struct drm_vmw_context_arg
*arg
= (struct drm_vmw_context_arg
*)data
;
335 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
338 if (unlikely(ctx
== NULL
))
342 ctx
->base
.shareable
= false;
343 ctx
->base
.tfile
= NULL
;
345 ret
= vmw_context_init(dev_priv
, res
, vmw_user_context_free
);
346 if (unlikely(ret
!= 0))
349 tmp
= vmw_resource_reference(&ctx
->res
);
350 ret
= ttm_base_object_init(tfile
, &ctx
->base
, false, VMW_RES_CONTEXT
,
351 &vmw_user_context_base_release
, NULL
);
353 if (unlikely(ret
!= 0)) {
354 vmw_resource_unreference(&tmp
);
360 vmw_resource_unreference(&res
);
365 int vmw_context_check(struct vmw_private
*dev_priv
,
366 struct ttm_object_file
*tfile
,
369 struct vmw_resource
*res
;
372 read_lock(&dev_priv
->resource_lock
);
373 res
= idr_find(&dev_priv
->context_idr
, id
);
374 if (res
&& res
->avail
) {
375 struct vmw_user_context
*ctx
=
376 container_of(res
, struct vmw_user_context
, res
);
377 if (ctx
->base
.tfile
!= tfile
&& !ctx
->base
.shareable
)
381 read_unlock(&dev_priv
->resource_lock
);
388 * Surface management.
391 static void vmw_hw_surface_destroy(struct vmw_resource
*res
)
394 struct vmw_private
*dev_priv
= res
->dev_priv
;
396 SVGA3dCmdHeader header
;
397 SVGA3dCmdDestroySurface body
;
398 } *cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
400 if (unlikely(cmd
== NULL
)) {
401 DRM_ERROR("Failed reserving FIFO space for surface "
406 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY
);
407 cmd
->header
.size
= cpu_to_le32(sizeof(cmd
->body
));
408 cmd
->body
.sid
= cpu_to_le32(res
->id
);
410 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
411 vmw_3d_resource_dec(dev_priv
);
414 void vmw_surface_res_free(struct vmw_resource
*res
)
416 struct vmw_surface
*srf
= container_of(res
, struct vmw_surface
, res
);
419 kfree(srf
->snooper
.image
);
423 int vmw_surface_init(struct vmw_private
*dev_priv
,
424 struct vmw_surface
*srf
,
425 void (*res_free
) (struct vmw_resource
*res
))
429 SVGA3dCmdHeader header
;
430 SVGA3dCmdDefineSurface body
;
432 SVGA3dSize
*cmd_size
;
433 struct vmw_resource
*res
= &srf
->res
;
434 struct drm_vmw_size
*src_size
;
439 BUG_ON(res_free
== NULL
);
440 ret
= vmw_resource_init(dev_priv
, res
, &dev_priv
->surface_idr
,
441 VMW_RES_SURFACE
, res_free
);
443 if (unlikely(ret
!= 0)) {
448 submit_size
= sizeof(*cmd
) + srf
->num_sizes
* sizeof(SVGA3dSize
);
449 cmd_len
= sizeof(cmd
->body
) + srf
->num_sizes
* sizeof(SVGA3dSize
);
451 cmd
= vmw_fifo_reserve(dev_priv
, submit_size
);
452 if (unlikely(cmd
== NULL
)) {
453 DRM_ERROR("Fifo reserve failed for create surface.\n");
454 vmw_resource_unreference(&res
);
458 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE
);
459 cmd
->header
.size
= cpu_to_le32(cmd_len
);
460 cmd
->body
.sid
= cpu_to_le32(res
->id
);
461 cmd
->body
.surfaceFlags
= cpu_to_le32(srf
->flags
);
462 cmd
->body
.format
= cpu_to_le32(srf
->format
);
463 for (i
= 0; i
< DRM_VMW_MAX_SURFACE_FACES
; ++i
) {
464 cmd
->body
.face
[i
].numMipLevels
=
465 cpu_to_le32(srf
->mip_levels
[i
]);
469 cmd_size
= (SVGA3dSize
*) cmd
;
470 src_size
= srf
->sizes
;
472 for (i
= 0; i
< srf
->num_sizes
; ++i
, cmd_size
++, src_size
++) {
473 cmd_size
->width
= cpu_to_le32(src_size
->width
);
474 cmd_size
->height
= cpu_to_le32(src_size
->height
);
475 cmd_size
->depth
= cpu_to_le32(src_size
->depth
);
478 vmw_fifo_commit(dev_priv
, submit_size
);
479 (void) vmw_3d_resource_inc(dev_priv
);
480 vmw_resource_activate(res
, vmw_hw_surface_destroy
);
484 static void vmw_user_surface_free(struct vmw_resource
*res
)
486 struct vmw_surface
*srf
= container_of(res
, struct vmw_surface
, res
);
487 struct vmw_user_surface
*user_srf
=
488 container_of(srf
, struct vmw_user_surface
, srf
);
491 kfree(srf
->snooper
.image
);
495 int vmw_user_surface_lookup_handle(struct vmw_private
*dev_priv
,
496 struct ttm_object_file
*tfile
,
497 uint32_t handle
, struct vmw_surface
**out
)
499 struct vmw_resource
*res
;
500 struct vmw_surface
*srf
;
501 struct vmw_user_surface
*user_srf
;
502 struct ttm_base_object
*base
;
505 base
= ttm_base_object_lookup(tfile
, handle
);
506 if (unlikely(base
== NULL
))
509 if (unlikely(base
->object_type
!= VMW_RES_SURFACE
))
510 goto out_bad_resource
;
512 user_srf
= container_of(base
, struct vmw_user_surface
, base
);
513 srf
= &user_srf
->srf
;
516 read_lock(&dev_priv
->resource_lock
);
518 if (!res
->avail
|| res
->res_free
!= &vmw_user_surface_free
) {
519 read_unlock(&dev_priv
->resource_lock
);
520 goto out_bad_resource
;
523 kref_get(&res
->kref
);
524 read_unlock(&dev_priv
->resource_lock
);
530 ttm_base_object_unref(&base
);
535 static void vmw_user_surface_base_release(struct ttm_base_object
**p_base
)
537 struct ttm_base_object
*base
= *p_base
;
538 struct vmw_user_surface
*user_srf
=
539 container_of(base
, struct vmw_user_surface
, base
);
540 struct vmw_resource
*res
= &user_srf
->srf
.res
;
543 vmw_resource_unreference(&res
);
546 int vmw_surface_destroy_ioctl(struct drm_device
*dev
, void *data
,
547 struct drm_file
*file_priv
)
549 struct drm_vmw_surface_arg
*arg
= (struct drm_vmw_surface_arg
*)data
;
550 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
552 return ttm_ref_object_base_unref(tfile
, arg
->sid
, TTM_REF_USAGE
);
555 int vmw_surface_define_ioctl(struct drm_device
*dev
, void *data
,
556 struct drm_file
*file_priv
)
558 struct vmw_private
*dev_priv
= vmw_priv(dev
);
559 struct vmw_user_surface
*user_srf
=
560 kmalloc(sizeof(*user_srf
), GFP_KERNEL
);
561 struct vmw_surface
*srf
;
562 struct vmw_resource
*res
;
563 struct vmw_resource
*tmp
;
564 union drm_vmw_surface_create_arg
*arg
=
565 (union drm_vmw_surface_create_arg
*)data
;
566 struct drm_vmw_surface_create_req
*req
= &arg
->req
;
567 struct drm_vmw_surface_arg
*rep
= &arg
->rep
;
568 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
569 struct drm_vmw_size __user
*user_sizes
;
573 if (unlikely(user_srf
== NULL
))
576 srf
= &user_srf
->srf
;
579 srf
->flags
= req
->flags
;
580 srf
->format
= req
->format
;
581 srf
->scanout
= req
->scanout
;
582 memcpy(srf
->mip_levels
, req
->mip_levels
, sizeof(srf
->mip_levels
));
584 for (i
= 0; i
< DRM_VMW_MAX_SURFACE_FACES
; ++i
)
585 srf
->num_sizes
+= srf
->mip_levels
[i
];
587 if (srf
->num_sizes
> DRM_VMW_MAX_SURFACE_FACES
*
588 DRM_VMW_MAX_MIP_LEVELS
) {
593 srf
->sizes
= kmalloc(srf
->num_sizes
* sizeof(*srf
->sizes
), GFP_KERNEL
);
594 if (unlikely(srf
->sizes
== NULL
)) {
599 user_sizes
= (struct drm_vmw_size __user
*)(unsigned long)
602 ret
= copy_from_user(srf
->sizes
, user_sizes
,
603 srf
->num_sizes
* sizeof(*srf
->sizes
));
604 if (unlikely(ret
!= 0)) {
610 srf
->num_sizes
== 1 &&
611 srf
->sizes
[0].width
== 64 &&
612 srf
->sizes
[0].height
== 64 &&
613 srf
->format
== SVGA3D_A8R8G8B8
) {
615 srf
->snooper
.image
= kmalloc(64 * 64 * 4, GFP_KERNEL
);
616 /* clear the image */
617 if (srf
->snooper
.image
) {
618 memset(srf
->snooper
.image
, 0x00, 64 * 64 * 4);
620 DRM_ERROR("Failed to allocate cursor_image\n");
625 srf
->snooper
.image
= NULL
;
627 srf
->snooper
.crtc
= NULL
;
629 user_srf
->base
.shareable
= false;
630 user_srf
->base
.tfile
= NULL
;
633 * From this point, the generic resource management functions
634 * destroy the object on failure.
637 ret
= vmw_surface_init(dev_priv
, srf
, vmw_user_surface_free
);
638 if (unlikely(ret
!= 0))
641 tmp
= vmw_resource_reference(&srf
->res
);
642 ret
= ttm_base_object_init(tfile
, &user_srf
->base
,
643 req
->shareable
, VMW_RES_SURFACE
,
644 &vmw_user_surface_base_release
, NULL
);
646 if (unlikely(ret
!= 0)) {
647 vmw_resource_unreference(&tmp
);
648 vmw_resource_unreference(&res
);
652 rep
->sid
= user_srf
->base
.hash
.key
;
653 if (rep
->sid
== SVGA3D_INVALID_ID
)
654 DRM_ERROR("Created bad Surface ID.\n");
656 vmw_resource_unreference(&res
);
665 int vmw_surface_reference_ioctl(struct drm_device
*dev
, void *data
,
666 struct drm_file
*file_priv
)
668 union drm_vmw_surface_reference_arg
*arg
=
669 (union drm_vmw_surface_reference_arg
*)data
;
670 struct drm_vmw_surface_arg
*req
= &arg
->req
;
671 struct drm_vmw_surface_create_req
*rep
= &arg
->rep
;
672 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
673 struct vmw_surface
*srf
;
674 struct vmw_user_surface
*user_srf
;
675 struct drm_vmw_size __user
*user_sizes
;
676 struct ttm_base_object
*base
;
679 base
= ttm_base_object_lookup(tfile
, req
->sid
);
680 if (unlikely(base
== NULL
)) {
681 DRM_ERROR("Could not find surface to reference.\n");
685 if (unlikely(base
->object_type
!= VMW_RES_SURFACE
))
686 goto out_bad_resource
;
688 user_srf
= container_of(base
, struct vmw_user_surface
, base
);
689 srf
= &user_srf
->srf
;
691 ret
= ttm_ref_object_add(tfile
, &user_srf
->base
, TTM_REF_USAGE
, NULL
);
692 if (unlikely(ret
!= 0)) {
693 DRM_ERROR("Could not add a reference to a surface.\n");
694 goto out_no_reference
;
697 rep
->flags
= srf
->flags
;
698 rep
->format
= srf
->format
;
699 memcpy(rep
->mip_levels
, srf
->mip_levels
, sizeof(srf
->mip_levels
));
700 user_sizes
= (struct drm_vmw_size __user
*)(unsigned long)
704 ret
= copy_to_user(user_sizes
, srf
->sizes
,
705 srf
->num_sizes
* sizeof(*srf
->sizes
));
706 if (unlikely(ret
!= 0)) {
707 DRM_ERROR("copy_to_user failed %p %u\n",
708 user_sizes
, srf
->num_sizes
);
713 ttm_base_object_unref(&base
);
718 int vmw_surface_check(struct vmw_private
*dev_priv
,
719 struct ttm_object_file
*tfile
,
720 uint32_t handle
, int *id
)
722 struct ttm_base_object
*base
;
723 struct vmw_user_surface
*user_srf
;
727 base
= ttm_base_object_lookup(tfile
, handle
);
728 if (unlikely(base
== NULL
))
731 if (unlikely(base
->object_type
!= VMW_RES_SURFACE
))
732 goto out_bad_surface
;
734 user_srf
= container_of(base
, struct vmw_user_surface
, base
);
735 *id
= user_srf
->srf
.res
.id
;
740 * FIXME: May deadlock here when called from the
741 * command parsing code.
744 ttm_base_object_unref(&base
);
752 static size_t vmw_dmabuf_acc_size(struct ttm_bo_global
*glob
,
753 unsigned long num_pages
)
755 static size_t bo_user_size
= ~0;
757 size_t page_array_size
=
758 (num_pages
* sizeof(void *) + PAGE_SIZE
- 1) & PAGE_MASK
;
760 if (unlikely(bo_user_size
== ~0)) {
761 bo_user_size
= glob
->ttm_bo_extra_size
+
762 ttm_round_pot(sizeof(struct vmw_dma_buffer
));
765 return bo_user_size
+ page_array_size
;
768 void vmw_dmabuf_bo_free(struct ttm_buffer_object
*bo
)
770 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
771 struct ttm_bo_global
*glob
= bo
->glob
;
773 ttm_mem_global_free(glob
->mem_glob
, bo
->acc_size
);
777 int vmw_dmabuf_init(struct vmw_private
*dev_priv
,
778 struct vmw_dma_buffer
*vmw_bo
,
779 size_t size
, struct ttm_placement
*placement
,
781 void (*bo_free
) (struct ttm_buffer_object
*bo
))
783 struct ttm_bo_device
*bdev
= &dev_priv
->bdev
;
784 struct ttm_mem_global
*mem_glob
= bdev
->glob
->mem_glob
;
791 vmw_dmabuf_acc_size(bdev
->glob
,
792 (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
);
794 ret
= ttm_mem_global_alloc(mem_glob
, acc_size
, false, false);
795 if (unlikely(ret
!= 0)) {
796 /* we must free the bo here as
797 * ttm_buffer_object_init does so as well */
798 bo_free(&vmw_bo
->base
);
802 memset(vmw_bo
, 0, sizeof(*vmw_bo
));
804 INIT_LIST_HEAD(&vmw_bo
->validate_list
);
806 ret
= ttm_bo_init(bdev
, &vmw_bo
->base
, size
,
807 ttm_bo_type_device
, placement
,
809 NULL
, acc_size
, bo_free
);
813 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object
*bo
)
815 struct vmw_user_dma_buffer
*vmw_user_bo
= vmw_user_dma_buffer(bo
);
816 struct ttm_bo_global
*glob
= bo
->glob
;
818 ttm_mem_global_free(glob
->mem_glob
, bo
->acc_size
);
822 static void vmw_user_dmabuf_release(struct ttm_base_object
**p_base
)
824 struct vmw_user_dma_buffer
*vmw_user_bo
;
825 struct ttm_base_object
*base
= *p_base
;
826 struct ttm_buffer_object
*bo
;
830 if (unlikely(base
== NULL
))
833 vmw_user_bo
= container_of(base
, struct vmw_user_dma_buffer
, base
);
834 bo
= &vmw_user_bo
->dma
.base
;
838 int vmw_dmabuf_alloc_ioctl(struct drm_device
*dev
, void *data
,
839 struct drm_file
*file_priv
)
841 struct vmw_private
*dev_priv
= vmw_priv(dev
);
842 union drm_vmw_alloc_dmabuf_arg
*arg
=
843 (union drm_vmw_alloc_dmabuf_arg
*)data
;
844 struct drm_vmw_alloc_dmabuf_req
*req
= &arg
->req
;
845 struct drm_vmw_dmabuf_rep
*rep
= &arg
->rep
;
846 struct vmw_user_dma_buffer
*vmw_user_bo
;
847 struct ttm_buffer_object
*tmp
;
848 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
851 vmw_user_bo
= kzalloc(sizeof(*vmw_user_bo
), GFP_KERNEL
);
852 if (unlikely(vmw_user_bo
== NULL
))
855 ret
= ttm_read_lock(&vmaster
->lock
, true);
856 if (unlikely(ret
!= 0)) {
861 ret
= vmw_dmabuf_init(dev_priv
, &vmw_user_bo
->dma
, req
->size
,
862 &vmw_vram_sys_placement
, true,
863 &vmw_user_dmabuf_destroy
);
864 if (unlikely(ret
!= 0))
867 tmp
= ttm_bo_reference(&vmw_user_bo
->dma
.base
);
868 ret
= ttm_base_object_init(vmw_fpriv(file_priv
)->tfile
,
872 &vmw_user_dmabuf_release
, NULL
);
873 if (unlikely(ret
!= 0))
874 goto out_no_base_object
;
876 rep
->handle
= vmw_user_bo
->base
.hash
.key
;
877 rep
->map_handle
= vmw_user_bo
->dma
.base
.addr_space_offset
;
878 rep
->cur_gmr_id
= vmw_user_bo
->base
.hash
.key
;
879 rep
->cur_gmr_offset
= 0;
885 ttm_read_unlock(&vmaster
->lock
);
890 int vmw_dmabuf_unref_ioctl(struct drm_device
*dev
, void *data
,
891 struct drm_file
*file_priv
)
893 struct drm_vmw_unref_dmabuf_arg
*arg
=
894 (struct drm_vmw_unref_dmabuf_arg
*)data
;
896 return ttm_ref_object_base_unref(vmw_fpriv(file_priv
)->tfile
,
901 uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object
*bo
,
902 uint32_t cur_validate_node
)
904 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
906 if (likely(vmw_bo
->on_validate_list
))
907 return vmw_bo
->cur_validate_node
;
909 vmw_bo
->cur_validate_node
= cur_validate_node
;
910 vmw_bo
->on_validate_list
= true;
912 return cur_validate_node
;
915 void vmw_dmabuf_validate_clear(struct ttm_buffer_object
*bo
)
917 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
919 vmw_bo
->on_validate_list
= false;
922 int vmw_user_dmabuf_lookup(struct ttm_object_file
*tfile
,
923 uint32_t handle
, struct vmw_dma_buffer
**out
)
925 struct vmw_user_dma_buffer
*vmw_user_bo
;
926 struct ttm_base_object
*base
;
928 base
= ttm_base_object_lookup(tfile
, handle
);
929 if (unlikely(base
== NULL
)) {
930 printk(KERN_ERR
"Invalid buffer object handle 0x%08lx.\n",
931 (unsigned long)handle
);
935 if (unlikely(base
->object_type
!= ttm_buffer_type
)) {
936 ttm_base_object_unref(&base
);
937 printk(KERN_ERR
"Invalid buffer object handle 0x%08lx.\n",
938 (unsigned long)handle
);
942 vmw_user_bo
= container_of(base
, struct vmw_user_dma_buffer
, base
);
943 (void)ttm_bo_reference(&vmw_user_bo
->dma
.base
);
944 ttm_base_object_unref(&base
);
945 *out
= &vmw_user_bo
->dma
;
954 static void vmw_stream_destroy(struct vmw_resource
*res
)
956 struct vmw_private
*dev_priv
= res
->dev_priv
;
957 struct vmw_stream
*stream
;
960 DRM_INFO("%s: unref\n", __func__
);
961 stream
= container_of(res
, struct vmw_stream
, res
);
963 ret
= vmw_overlay_unref(dev_priv
, stream
->stream_id
);
967 static int vmw_stream_init(struct vmw_private
*dev_priv
,
968 struct vmw_stream
*stream
,
969 void (*res_free
) (struct vmw_resource
*res
))
971 struct vmw_resource
*res
= &stream
->res
;
974 ret
= vmw_resource_init(dev_priv
, res
, &dev_priv
->stream_idr
,
975 VMW_RES_STREAM
, res_free
);
977 if (unlikely(ret
!= 0)) {
978 if (res_free
== NULL
)
981 res_free(&stream
->res
);
985 ret
= vmw_overlay_claim(dev_priv
, &stream
->stream_id
);
987 vmw_resource_unreference(&res
);
991 DRM_INFO("%s: claimed\n", __func__
);
993 vmw_resource_activate(&stream
->res
, vmw_stream_destroy
);
998 * User-space context management:
1001 static void vmw_user_stream_free(struct vmw_resource
*res
)
1003 struct vmw_user_stream
*stream
=
1004 container_of(res
, struct vmw_user_stream
, stream
.res
);
1010 * This function is called when user space has no more references on the
1011 * base object. It releases the base-object's reference on the resource object.
1014 static void vmw_user_stream_base_release(struct ttm_base_object
**p_base
)
1016 struct ttm_base_object
*base
= *p_base
;
1017 struct vmw_user_stream
*stream
=
1018 container_of(base
, struct vmw_user_stream
, base
);
1019 struct vmw_resource
*res
= &stream
->stream
.res
;
1022 vmw_resource_unreference(&res
);
1025 int vmw_stream_unref_ioctl(struct drm_device
*dev
, void *data
,
1026 struct drm_file
*file_priv
)
1028 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1029 struct vmw_resource
*res
;
1030 struct vmw_user_stream
*stream
;
1031 struct drm_vmw_stream_arg
*arg
= (struct drm_vmw_stream_arg
*)data
;
1032 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1035 res
= vmw_resource_lookup(dev_priv
, &dev_priv
->stream_idr
, arg
->stream_id
);
1036 if (unlikely(res
== NULL
))
1039 if (res
->res_free
!= &vmw_user_stream_free
) {
1044 stream
= container_of(res
, struct vmw_user_stream
, stream
.res
);
1045 if (stream
->base
.tfile
!= tfile
) {
1050 ttm_ref_object_base_unref(tfile
, stream
->base
.hash
.key
, TTM_REF_USAGE
);
1052 vmw_resource_unreference(&res
);
1056 int vmw_stream_claim_ioctl(struct drm_device
*dev
, void *data
,
1057 struct drm_file
*file_priv
)
1059 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1060 struct vmw_user_stream
*stream
= kmalloc(sizeof(*stream
), GFP_KERNEL
);
1061 struct vmw_resource
*res
;
1062 struct vmw_resource
*tmp
;
1063 struct drm_vmw_stream_arg
*arg
= (struct drm_vmw_stream_arg
*)data
;
1064 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1067 if (unlikely(stream
== NULL
))
1070 res
= &stream
->stream
.res
;
1071 stream
->base
.shareable
= false;
1072 stream
->base
.tfile
= NULL
;
1074 ret
= vmw_stream_init(dev_priv
, &stream
->stream
, vmw_user_stream_free
);
1075 if (unlikely(ret
!= 0))
1078 tmp
= vmw_resource_reference(res
);
1079 ret
= ttm_base_object_init(tfile
, &stream
->base
, false, VMW_RES_STREAM
,
1080 &vmw_user_stream_base_release
, NULL
);
1082 if (unlikely(ret
!= 0)) {
1083 vmw_resource_unreference(&tmp
);
1087 arg
->stream_id
= res
->id
;
1089 vmw_resource_unreference(&res
);
1093 int vmw_user_stream_lookup(struct vmw_private
*dev_priv
,
1094 struct ttm_object_file
*tfile
,
1095 uint32_t *inout_id
, struct vmw_resource
**out
)
1097 struct vmw_user_stream
*stream
;
1098 struct vmw_resource
*res
;
1101 res
= vmw_resource_lookup(dev_priv
, &dev_priv
->stream_idr
, *inout_id
);
1102 if (unlikely(res
== NULL
))
1105 if (res
->res_free
!= &vmw_user_stream_free
) {
1110 stream
= container_of(res
, struct vmw_user_stream
, stream
.res
);
1111 if (stream
->base
.tfile
!= tfile
) {
1116 *inout_id
= stream
->stream
.stream_id
;
1120 vmw_resource_unreference(&res
);