1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_drm.h"
30 #include "ttm/ttm_object.h"
31 #include "ttm/ttm_placement.h"
34 #define VMW_RES_CONTEXT ttm_driver_type0
35 #define VMW_RES_SURFACE ttm_driver_type1
36 #define VMW_RES_STREAM ttm_driver_type2
38 struct vmw_user_context
{
39 struct ttm_base_object base
;
40 struct vmw_resource res
;
43 struct vmw_user_surface
{
44 struct ttm_base_object base
;
45 struct vmw_surface srf
;
48 struct vmw_user_dma_buffer
{
49 struct ttm_base_object base
;
50 struct vmw_dma_buffer dma
;
53 struct vmw_bo_user_rep
{
59 struct vmw_resource res
;
63 struct vmw_user_stream
{
64 struct ttm_base_object base
;
65 struct vmw_stream stream
;
68 static inline struct vmw_dma_buffer
*
69 vmw_dma_buffer(struct ttm_buffer_object
*bo
)
71 return container_of(bo
, struct vmw_dma_buffer
, base
);
74 static inline struct vmw_user_dma_buffer
*
75 vmw_user_dma_buffer(struct ttm_buffer_object
*bo
)
77 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
78 return container_of(vmw_bo
, struct vmw_user_dma_buffer
, dma
);
81 struct vmw_resource
*vmw_resource_reference(struct vmw_resource
*res
)
87 static void vmw_resource_release(struct kref
*kref
)
89 struct vmw_resource
*res
=
90 container_of(kref
, struct vmw_resource
, kref
);
91 struct vmw_private
*dev_priv
= res
->dev_priv
;
93 idr_remove(res
->idr
, res
->id
);
94 write_unlock(&dev_priv
->resource_lock
);
96 if (likely(res
->hw_destroy
!= NULL
))
99 if (res
->res_free
!= NULL
)
104 write_lock(&dev_priv
->resource_lock
);
107 void vmw_resource_unreference(struct vmw_resource
**p_res
)
109 struct vmw_resource
*res
= *p_res
;
110 struct vmw_private
*dev_priv
= res
->dev_priv
;
113 write_lock(&dev_priv
->resource_lock
);
114 kref_put(&res
->kref
, vmw_resource_release
);
115 write_unlock(&dev_priv
->resource_lock
);
118 static int vmw_resource_init(struct vmw_private
*dev_priv
,
119 struct vmw_resource
*res
,
121 enum ttm_object_type obj_type
,
122 void (*res_free
) (struct vmw_resource
*res
))
126 kref_init(&res
->kref
);
127 res
->hw_destroy
= NULL
;
128 res
->res_free
= res_free
;
129 res
->res_type
= obj_type
;
132 res
->dev_priv
= dev_priv
;
135 if (unlikely(idr_pre_get(idr
, GFP_KERNEL
) == 0))
138 write_lock(&dev_priv
->resource_lock
);
139 ret
= idr_get_new_above(idr
, res
, 1, &res
->id
);
140 write_unlock(&dev_priv
->resource_lock
);
142 } while (ret
== -EAGAIN
);
148 * vmw_resource_activate
150 * @res: Pointer to the newly created resource
151 * @hw_destroy: Destroy function. NULL if none.
153 * Activate a resource after the hardware has been made aware of it.
154 * Set tye destroy function to @destroy. Typically this frees the
155 * resource and destroys the hardware resources associated with it.
156 * Activate basically means that the function vmw_resource_lookup will
160 static void vmw_resource_activate(struct vmw_resource
*res
,
161 void (*hw_destroy
) (struct vmw_resource
*))
163 struct vmw_private
*dev_priv
= res
->dev_priv
;
165 write_lock(&dev_priv
->resource_lock
);
167 res
->hw_destroy
= hw_destroy
;
168 write_unlock(&dev_priv
->resource_lock
);
171 struct vmw_resource
*vmw_resource_lookup(struct vmw_private
*dev_priv
,
172 struct idr
*idr
, int id
)
174 struct vmw_resource
*res
;
176 read_lock(&dev_priv
->resource_lock
);
177 res
= idr_find(idr
, id
);
178 if (res
&& res
->avail
)
179 kref_get(&res
->kref
);
182 read_unlock(&dev_priv
->resource_lock
);
184 if (unlikely(res
== NULL
))
191 * Context management:
194 static void vmw_hw_context_destroy(struct vmw_resource
*res
)
197 struct vmw_private
*dev_priv
= res
->dev_priv
;
199 SVGA3dCmdHeader header
;
200 SVGA3dCmdDestroyContext body
;
201 } *cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
203 if (unlikely(cmd
== NULL
)) {
204 DRM_ERROR("Failed reserving FIFO space for surface "
209 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY
);
210 cmd
->header
.size
= cpu_to_le32(sizeof(cmd
->body
));
211 cmd
->body
.cid
= cpu_to_le32(res
->id
);
213 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
214 vmw_3d_resource_dec(dev_priv
);
217 static int vmw_context_init(struct vmw_private
*dev_priv
,
218 struct vmw_resource
*res
,
219 void (*res_free
) (struct vmw_resource
*res
))
224 SVGA3dCmdHeader header
;
225 SVGA3dCmdDefineContext body
;
228 ret
= vmw_resource_init(dev_priv
, res
, &dev_priv
->context_idr
,
229 VMW_RES_CONTEXT
, res_free
);
231 if (unlikely(ret
!= 0)) {
232 if (res_free
== NULL
)
239 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
240 if (unlikely(cmd
== NULL
)) {
241 DRM_ERROR("Fifo reserve failed.\n");
242 vmw_resource_unreference(&res
);
246 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE
);
247 cmd
->header
.size
= cpu_to_le32(sizeof(cmd
->body
));
248 cmd
->body
.cid
= cpu_to_le32(res
->id
);
250 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
251 (void) vmw_3d_resource_inc(dev_priv
);
252 vmw_resource_activate(res
, vmw_hw_context_destroy
);
256 struct vmw_resource
*vmw_context_alloc(struct vmw_private
*dev_priv
)
258 struct vmw_resource
*res
= kmalloc(sizeof(*res
), GFP_KERNEL
);
261 if (unlikely(res
== NULL
))
264 ret
= vmw_context_init(dev_priv
, res
, NULL
);
265 return (ret
== 0) ? res
: NULL
;
269 * User-space context management:
272 static void vmw_user_context_free(struct vmw_resource
*res
)
274 struct vmw_user_context
*ctx
=
275 container_of(res
, struct vmw_user_context
, res
);
281 * This function is called when user space has no more references on the
282 * base object. It releases the base-object's reference on the resource object.
285 static void vmw_user_context_base_release(struct ttm_base_object
**p_base
)
287 struct ttm_base_object
*base
= *p_base
;
288 struct vmw_user_context
*ctx
=
289 container_of(base
, struct vmw_user_context
, base
);
290 struct vmw_resource
*res
= &ctx
->res
;
293 vmw_resource_unreference(&res
);
296 int vmw_context_destroy_ioctl(struct drm_device
*dev
, void *data
,
297 struct drm_file
*file_priv
)
299 struct vmw_private
*dev_priv
= vmw_priv(dev
);
300 struct vmw_resource
*res
;
301 struct vmw_user_context
*ctx
;
302 struct drm_vmw_context_arg
*arg
= (struct drm_vmw_context_arg
*)data
;
303 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
306 res
= vmw_resource_lookup(dev_priv
, &dev_priv
->context_idr
, arg
->cid
);
307 if (unlikely(res
== NULL
))
310 if (res
->res_free
!= &vmw_user_context_free
) {
315 ctx
= container_of(res
, struct vmw_user_context
, res
);
316 if (ctx
->base
.tfile
!= tfile
&& !ctx
->base
.shareable
) {
321 ttm_ref_object_base_unref(tfile
, ctx
->base
.hash
.key
, TTM_REF_USAGE
);
323 vmw_resource_unreference(&res
);
327 int vmw_context_define_ioctl(struct drm_device
*dev
, void *data
,
328 struct drm_file
*file_priv
)
330 struct vmw_private
*dev_priv
= vmw_priv(dev
);
331 struct vmw_user_context
*ctx
= kmalloc(sizeof(*ctx
), GFP_KERNEL
);
332 struct vmw_resource
*res
;
333 struct vmw_resource
*tmp
;
334 struct drm_vmw_context_arg
*arg
= (struct drm_vmw_context_arg
*)data
;
335 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
338 if (unlikely(ctx
== NULL
))
342 ctx
->base
.shareable
= false;
343 ctx
->base
.tfile
= NULL
;
345 ret
= vmw_context_init(dev_priv
, res
, vmw_user_context_free
);
346 if (unlikely(ret
!= 0))
349 tmp
= vmw_resource_reference(&ctx
->res
);
350 ret
= ttm_base_object_init(tfile
, &ctx
->base
, false, VMW_RES_CONTEXT
,
351 &vmw_user_context_base_release
, NULL
);
353 if (unlikely(ret
!= 0)) {
354 vmw_resource_unreference(&tmp
);
360 vmw_resource_unreference(&res
);
365 int vmw_context_check(struct vmw_private
*dev_priv
,
366 struct ttm_object_file
*tfile
,
369 struct vmw_resource
*res
;
372 read_lock(&dev_priv
->resource_lock
);
373 res
= idr_find(&dev_priv
->context_idr
, id
);
374 if (res
&& res
->avail
) {
375 struct vmw_user_context
*ctx
=
376 container_of(res
, struct vmw_user_context
, res
);
377 if (ctx
->base
.tfile
!= tfile
&& !ctx
->base
.shareable
)
381 read_unlock(&dev_priv
->resource_lock
);
388 * Surface management.
391 static void vmw_hw_surface_destroy(struct vmw_resource
*res
)
394 struct vmw_private
*dev_priv
= res
->dev_priv
;
396 SVGA3dCmdHeader header
;
397 SVGA3dCmdDestroySurface body
;
398 } *cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
400 if (unlikely(cmd
== NULL
)) {
401 DRM_ERROR("Failed reserving FIFO space for surface "
406 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY
);
407 cmd
->header
.size
= cpu_to_le32(sizeof(cmd
->body
));
408 cmd
->body
.sid
= cpu_to_le32(res
->id
);
410 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
411 vmw_3d_resource_dec(dev_priv
);
414 void vmw_surface_res_free(struct vmw_resource
*res
)
416 struct vmw_surface
*srf
= container_of(res
, struct vmw_surface
, res
);
419 kfree(srf
->snooper
.image
);
423 int vmw_surface_init(struct vmw_private
*dev_priv
,
424 struct vmw_surface
*srf
,
425 void (*res_free
) (struct vmw_resource
*res
))
429 SVGA3dCmdHeader header
;
430 SVGA3dCmdDefineSurface body
;
432 SVGA3dSize
*cmd_size
;
433 struct vmw_resource
*res
= &srf
->res
;
434 struct drm_vmw_size
*src_size
;
439 BUG_ON(res_free
== NULL
);
440 ret
= vmw_resource_init(dev_priv
, res
, &dev_priv
->surface_idr
,
441 VMW_RES_SURFACE
, res_free
);
443 if (unlikely(ret
!= 0)) {
448 submit_size
= sizeof(*cmd
) + srf
->num_sizes
* sizeof(SVGA3dSize
);
449 cmd_len
= sizeof(cmd
->body
) + srf
->num_sizes
* sizeof(SVGA3dSize
);
451 cmd
= vmw_fifo_reserve(dev_priv
, submit_size
);
452 if (unlikely(cmd
== NULL
)) {
453 DRM_ERROR("Fifo reserve failed for create surface.\n");
454 vmw_resource_unreference(&res
);
458 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE
);
459 cmd
->header
.size
= cpu_to_le32(cmd_len
);
460 cmd
->body
.sid
= cpu_to_le32(res
->id
);
461 cmd
->body
.surfaceFlags
= cpu_to_le32(srf
->flags
);
462 cmd
->body
.format
= cpu_to_le32(srf
->format
);
463 for (i
= 0; i
< DRM_VMW_MAX_SURFACE_FACES
; ++i
) {
464 cmd
->body
.face
[i
].numMipLevels
=
465 cpu_to_le32(srf
->mip_levels
[i
]);
469 cmd_size
= (SVGA3dSize
*) cmd
;
470 src_size
= srf
->sizes
;
472 for (i
= 0; i
< srf
->num_sizes
; ++i
, cmd_size
++, src_size
++) {
473 cmd_size
->width
= cpu_to_le32(src_size
->width
);
474 cmd_size
->height
= cpu_to_le32(src_size
->height
);
475 cmd_size
->depth
= cpu_to_le32(src_size
->depth
);
478 vmw_fifo_commit(dev_priv
, submit_size
);
479 (void) vmw_3d_resource_inc(dev_priv
);
480 vmw_resource_activate(res
, vmw_hw_surface_destroy
);
484 static void vmw_user_surface_free(struct vmw_resource
*res
)
486 struct vmw_surface
*srf
= container_of(res
, struct vmw_surface
, res
);
487 struct vmw_user_surface
*user_srf
=
488 container_of(srf
, struct vmw_user_surface
, srf
);
491 kfree(srf
->snooper
.image
);
495 int vmw_user_surface_lookup_handle(struct vmw_private
*dev_priv
,
496 struct ttm_object_file
*tfile
,
497 uint32_t handle
, struct vmw_surface
**out
)
499 struct vmw_resource
*res
;
500 struct vmw_surface
*srf
;
501 struct vmw_user_surface
*user_srf
;
502 struct ttm_base_object
*base
;
505 base
= ttm_base_object_lookup(tfile
, handle
);
506 if (unlikely(base
== NULL
))
509 if (unlikely(base
->object_type
!= VMW_RES_SURFACE
))
510 goto out_bad_resource
;
512 user_srf
= container_of(base
, struct vmw_user_surface
, base
);
513 srf
= &user_srf
->srf
;
516 read_lock(&dev_priv
->resource_lock
);
518 if (!res
->avail
|| res
->res_free
!= &vmw_user_surface_free
) {
519 read_unlock(&dev_priv
->resource_lock
);
520 goto out_bad_resource
;
523 kref_get(&res
->kref
);
524 read_unlock(&dev_priv
->resource_lock
);
530 ttm_base_object_unref(&base
);
535 static void vmw_user_surface_base_release(struct ttm_base_object
**p_base
)
537 struct ttm_base_object
*base
= *p_base
;
538 struct vmw_user_surface
*user_srf
=
539 container_of(base
, struct vmw_user_surface
, base
);
540 struct vmw_resource
*res
= &user_srf
->srf
.res
;
543 vmw_resource_unreference(&res
);
546 int vmw_surface_destroy_ioctl(struct drm_device
*dev
, void *data
,
547 struct drm_file
*file_priv
)
549 struct drm_vmw_surface_arg
*arg
= (struct drm_vmw_surface_arg
*)data
;
550 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
552 return ttm_ref_object_base_unref(tfile
, arg
->sid
, TTM_REF_USAGE
);
555 int vmw_surface_define_ioctl(struct drm_device
*dev
, void *data
,
556 struct drm_file
*file_priv
)
558 struct vmw_private
*dev_priv
= vmw_priv(dev
);
559 struct vmw_user_surface
*user_srf
=
560 kmalloc(sizeof(*user_srf
), GFP_KERNEL
);
561 struct vmw_surface
*srf
;
562 struct vmw_resource
*res
;
563 struct vmw_resource
*tmp
;
564 union drm_vmw_surface_create_arg
*arg
=
565 (union drm_vmw_surface_create_arg
*)data
;
566 struct drm_vmw_surface_create_req
*req
= &arg
->req
;
567 struct drm_vmw_surface_arg
*rep
= &arg
->rep
;
568 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
569 struct drm_vmw_size __user
*user_sizes
;
573 if (unlikely(user_srf
== NULL
))
576 srf
= &user_srf
->srf
;
579 srf
->flags
= req
->flags
;
580 srf
->format
= req
->format
;
581 srf
->scanout
= req
->scanout
;
582 memcpy(srf
->mip_levels
, req
->mip_levels
, sizeof(srf
->mip_levels
));
584 for (i
= 0; i
< DRM_VMW_MAX_SURFACE_FACES
; ++i
)
585 srf
->num_sizes
+= srf
->mip_levels
[i
];
587 if (srf
->num_sizes
> DRM_VMW_MAX_SURFACE_FACES
*
588 DRM_VMW_MAX_MIP_LEVELS
) {
593 srf
->sizes
= kmalloc(srf
->num_sizes
* sizeof(*srf
->sizes
), GFP_KERNEL
);
594 if (unlikely(srf
->sizes
== NULL
)) {
599 user_sizes
= (struct drm_vmw_size __user
*)(unsigned long)
602 ret
= copy_from_user(srf
->sizes
, user_sizes
,
603 srf
->num_sizes
* sizeof(*srf
->sizes
));
604 if (unlikely(ret
!= 0)) {
610 srf
->num_sizes
== 1 &&
611 srf
->sizes
[0].width
== 64 &&
612 srf
->sizes
[0].height
== 64 &&
613 srf
->format
== SVGA3D_A8R8G8B8
) {
615 /* allocate image area and clear it */
616 srf
->snooper
.image
= kzalloc(64 * 64 * 4, GFP_KERNEL
);
617 if (!srf
->snooper
.image
) {
618 DRM_ERROR("Failed to allocate cursor_image\n");
623 srf
->snooper
.image
= NULL
;
625 srf
->snooper
.crtc
= NULL
;
627 user_srf
->base
.shareable
= false;
628 user_srf
->base
.tfile
= NULL
;
631 * From this point, the generic resource management functions
632 * destroy the object on failure.
635 ret
= vmw_surface_init(dev_priv
, srf
, vmw_user_surface_free
);
636 if (unlikely(ret
!= 0))
639 tmp
= vmw_resource_reference(&srf
->res
);
640 ret
= ttm_base_object_init(tfile
, &user_srf
->base
,
641 req
->shareable
, VMW_RES_SURFACE
,
642 &vmw_user_surface_base_release
, NULL
);
644 if (unlikely(ret
!= 0)) {
645 vmw_resource_unreference(&tmp
);
646 vmw_resource_unreference(&res
);
650 rep
->sid
= user_srf
->base
.hash
.key
;
651 if (rep
->sid
== SVGA3D_INVALID_ID
)
652 DRM_ERROR("Created bad Surface ID.\n");
654 vmw_resource_unreference(&res
);
663 int vmw_surface_reference_ioctl(struct drm_device
*dev
, void *data
,
664 struct drm_file
*file_priv
)
666 union drm_vmw_surface_reference_arg
*arg
=
667 (union drm_vmw_surface_reference_arg
*)data
;
668 struct drm_vmw_surface_arg
*req
= &arg
->req
;
669 struct drm_vmw_surface_create_req
*rep
= &arg
->rep
;
670 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
671 struct vmw_surface
*srf
;
672 struct vmw_user_surface
*user_srf
;
673 struct drm_vmw_size __user
*user_sizes
;
674 struct ttm_base_object
*base
;
677 base
= ttm_base_object_lookup(tfile
, req
->sid
);
678 if (unlikely(base
== NULL
)) {
679 DRM_ERROR("Could not find surface to reference.\n");
683 if (unlikely(base
->object_type
!= VMW_RES_SURFACE
))
684 goto out_bad_resource
;
686 user_srf
= container_of(base
, struct vmw_user_surface
, base
);
687 srf
= &user_srf
->srf
;
689 ret
= ttm_ref_object_add(tfile
, &user_srf
->base
, TTM_REF_USAGE
, NULL
);
690 if (unlikely(ret
!= 0)) {
691 DRM_ERROR("Could not add a reference to a surface.\n");
692 goto out_no_reference
;
695 rep
->flags
= srf
->flags
;
696 rep
->format
= srf
->format
;
697 memcpy(rep
->mip_levels
, srf
->mip_levels
, sizeof(srf
->mip_levels
));
698 user_sizes
= (struct drm_vmw_size __user
*)(unsigned long)
702 ret
= copy_to_user(user_sizes
, srf
->sizes
,
703 srf
->num_sizes
* sizeof(*srf
->sizes
));
704 if (unlikely(ret
!= 0)) {
705 DRM_ERROR("copy_to_user failed %p %u\n",
706 user_sizes
, srf
->num_sizes
);
711 ttm_base_object_unref(&base
);
716 int vmw_surface_check(struct vmw_private
*dev_priv
,
717 struct ttm_object_file
*tfile
,
718 uint32_t handle
, int *id
)
720 struct ttm_base_object
*base
;
721 struct vmw_user_surface
*user_srf
;
725 base
= ttm_base_object_lookup(tfile
, handle
);
726 if (unlikely(base
== NULL
))
729 if (unlikely(base
->object_type
!= VMW_RES_SURFACE
))
730 goto out_bad_surface
;
732 user_srf
= container_of(base
, struct vmw_user_surface
, base
);
733 *id
= user_srf
->srf
.res
.id
;
738 * FIXME: May deadlock here when called from the
739 * command parsing code.
742 ttm_base_object_unref(&base
);
750 static size_t vmw_dmabuf_acc_size(struct ttm_bo_global
*glob
,
751 unsigned long num_pages
)
753 static size_t bo_user_size
= ~0;
755 size_t page_array_size
=
756 (num_pages
* sizeof(void *) + PAGE_SIZE
- 1) & PAGE_MASK
;
758 if (unlikely(bo_user_size
== ~0)) {
759 bo_user_size
= glob
->ttm_bo_extra_size
+
760 ttm_round_pot(sizeof(struct vmw_dma_buffer
));
763 return bo_user_size
+ page_array_size
;
766 void vmw_dmabuf_bo_free(struct ttm_buffer_object
*bo
)
768 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
769 struct ttm_bo_global
*glob
= bo
->glob
;
771 ttm_mem_global_free(glob
->mem_glob
, bo
->acc_size
);
775 int vmw_dmabuf_init(struct vmw_private
*dev_priv
,
776 struct vmw_dma_buffer
*vmw_bo
,
777 size_t size
, struct ttm_placement
*placement
,
779 void (*bo_free
) (struct ttm_buffer_object
*bo
))
781 struct ttm_bo_device
*bdev
= &dev_priv
->bdev
;
782 struct ttm_mem_global
*mem_glob
= bdev
->glob
->mem_glob
;
789 vmw_dmabuf_acc_size(bdev
->glob
,
790 (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
);
792 ret
= ttm_mem_global_alloc(mem_glob
, acc_size
, false, false);
793 if (unlikely(ret
!= 0)) {
794 /* we must free the bo here as
795 * ttm_buffer_object_init does so as well */
796 bo_free(&vmw_bo
->base
);
800 memset(vmw_bo
, 0, sizeof(*vmw_bo
));
802 INIT_LIST_HEAD(&vmw_bo
->validate_list
);
804 ret
= ttm_bo_init(bdev
, &vmw_bo
->base
, size
,
805 ttm_bo_type_device
, placement
,
807 NULL
, acc_size
, bo_free
);
811 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object
*bo
)
813 struct vmw_user_dma_buffer
*vmw_user_bo
= vmw_user_dma_buffer(bo
);
814 struct ttm_bo_global
*glob
= bo
->glob
;
816 ttm_mem_global_free(glob
->mem_glob
, bo
->acc_size
);
820 static void vmw_user_dmabuf_release(struct ttm_base_object
**p_base
)
822 struct vmw_user_dma_buffer
*vmw_user_bo
;
823 struct ttm_base_object
*base
= *p_base
;
824 struct ttm_buffer_object
*bo
;
828 if (unlikely(base
== NULL
))
831 vmw_user_bo
= container_of(base
, struct vmw_user_dma_buffer
, base
);
832 bo
= &vmw_user_bo
->dma
.base
;
836 int vmw_dmabuf_alloc_ioctl(struct drm_device
*dev
, void *data
,
837 struct drm_file
*file_priv
)
839 struct vmw_private
*dev_priv
= vmw_priv(dev
);
840 union drm_vmw_alloc_dmabuf_arg
*arg
=
841 (union drm_vmw_alloc_dmabuf_arg
*)data
;
842 struct drm_vmw_alloc_dmabuf_req
*req
= &arg
->req
;
843 struct drm_vmw_dmabuf_rep
*rep
= &arg
->rep
;
844 struct vmw_user_dma_buffer
*vmw_user_bo
;
845 struct ttm_buffer_object
*tmp
;
846 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
849 vmw_user_bo
= kzalloc(sizeof(*vmw_user_bo
), GFP_KERNEL
);
850 if (unlikely(vmw_user_bo
== NULL
))
853 ret
= ttm_read_lock(&vmaster
->lock
, true);
854 if (unlikely(ret
!= 0)) {
859 ret
= vmw_dmabuf_init(dev_priv
, &vmw_user_bo
->dma
, req
->size
,
860 &vmw_vram_sys_placement
, true,
861 &vmw_user_dmabuf_destroy
);
862 if (unlikely(ret
!= 0))
865 tmp
= ttm_bo_reference(&vmw_user_bo
->dma
.base
);
866 ret
= ttm_base_object_init(vmw_fpriv(file_priv
)->tfile
,
870 &vmw_user_dmabuf_release
, NULL
);
871 if (unlikely(ret
!= 0))
872 goto out_no_base_object
;
874 rep
->handle
= vmw_user_bo
->base
.hash
.key
;
875 rep
->map_handle
= vmw_user_bo
->dma
.base
.addr_space_offset
;
876 rep
->cur_gmr_id
= vmw_user_bo
->base
.hash
.key
;
877 rep
->cur_gmr_offset
= 0;
883 ttm_read_unlock(&vmaster
->lock
);
888 int vmw_dmabuf_unref_ioctl(struct drm_device
*dev
, void *data
,
889 struct drm_file
*file_priv
)
891 struct drm_vmw_unref_dmabuf_arg
*arg
=
892 (struct drm_vmw_unref_dmabuf_arg
*)data
;
894 return ttm_ref_object_base_unref(vmw_fpriv(file_priv
)->tfile
,
899 uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object
*bo
,
900 uint32_t cur_validate_node
)
902 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
904 if (likely(vmw_bo
->on_validate_list
))
905 return vmw_bo
->cur_validate_node
;
907 vmw_bo
->cur_validate_node
= cur_validate_node
;
908 vmw_bo
->on_validate_list
= true;
910 return cur_validate_node
;
913 void vmw_dmabuf_validate_clear(struct ttm_buffer_object
*bo
)
915 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
917 vmw_bo
->on_validate_list
= false;
920 int vmw_user_dmabuf_lookup(struct ttm_object_file
*tfile
,
921 uint32_t handle
, struct vmw_dma_buffer
**out
)
923 struct vmw_user_dma_buffer
*vmw_user_bo
;
924 struct ttm_base_object
*base
;
926 base
= ttm_base_object_lookup(tfile
, handle
);
927 if (unlikely(base
== NULL
)) {
928 printk(KERN_ERR
"Invalid buffer object handle 0x%08lx.\n",
929 (unsigned long)handle
);
933 if (unlikely(base
->object_type
!= ttm_buffer_type
)) {
934 ttm_base_object_unref(&base
);
935 printk(KERN_ERR
"Invalid buffer object handle 0x%08lx.\n",
936 (unsigned long)handle
);
940 vmw_user_bo
= container_of(base
, struct vmw_user_dma_buffer
, base
);
941 (void)ttm_bo_reference(&vmw_user_bo
->dma
.base
);
942 ttm_base_object_unref(&base
);
943 *out
= &vmw_user_bo
->dma
;
952 static void vmw_stream_destroy(struct vmw_resource
*res
)
954 struct vmw_private
*dev_priv
= res
->dev_priv
;
955 struct vmw_stream
*stream
;
958 DRM_INFO("%s: unref\n", __func__
);
959 stream
= container_of(res
, struct vmw_stream
, res
);
961 ret
= vmw_overlay_unref(dev_priv
, stream
->stream_id
);
965 static int vmw_stream_init(struct vmw_private
*dev_priv
,
966 struct vmw_stream
*stream
,
967 void (*res_free
) (struct vmw_resource
*res
))
969 struct vmw_resource
*res
= &stream
->res
;
972 ret
= vmw_resource_init(dev_priv
, res
, &dev_priv
->stream_idr
,
973 VMW_RES_STREAM
, res_free
);
975 if (unlikely(ret
!= 0)) {
976 if (res_free
== NULL
)
979 res_free(&stream
->res
);
983 ret
= vmw_overlay_claim(dev_priv
, &stream
->stream_id
);
985 vmw_resource_unreference(&res
);
989 DRM_INFO("%s: claimed\n", __func__
);
991 vmw_resource_activate(&stream
->res
, vmw_stream_destroy
);
996 * User-space context management:
999 static void vmw_user_stream_free(struct vmw_resource
*res
)
1001 struct vmw_user_stream
*stream
=
1002 container_of(res
, struct vmw_user_stream
, stream
.res
);
1008 * This function is called when user space has no more references on the
1009 * base object. It releases the base-object's reference on the resource object.
1012 static void vmw_user_stream_base_release(struct ttm_base_object
**p_base
)
1014 struct ttm_base_object
*base
= *p_base
;
1015 struct vmw_user_stream
*stream
=
1016 container_of(base
, struct vmw_user_stream
, base
);
1017 struct vmw_resource
*res
= &stream
->stream
.res
;
1020 vmw_resource_unreference(&res
);
1023 int vmw_stream_unref_ioctl(struct drm_device
*dev
, void *data
,
1024 struct drm_file
*file_priv
)
1026 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1027 struct vmw_resource
*res
;
1028 struct vmw_user_stream
*stream
;
1029 struct drm_vmw_stream_arg
*arg
= (struct drm_vmw_stream_arg
*)data
;
1030 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1033 res
= vmw_resource_lookup(dev_priv
, &dev_priv
->stream_idr
, arg
->stream_id
);
1034 if (unlikely(res
== NULL
))
1037 if (res
->res_free
!= &vmw_user_stream_free
) {
1042 stream
= container_of(res
, struct vmw_user_stream
, stream
.res
);
1043 if (stream
->base
.tfile
!= tfile
) {
1048 ttm_ref_object_base_unref(tfile
, stream
->base
.hash
.key
, TTM_REF_USAGE
);
1050 vmw_resource_unreference(&res
);
1054 int vmw_stream_claim_ioctl(struct drm_device
*dev
, void *data
,
1055 struct drm_file
*file_priv
)
1057 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1058 struct vmw_user_stream
*stream
= kmalloc(sizeof(*stream
), GFP_KERNEL
);
1059 struct vmw_resource
*res
;
1060 struct vmw_resource
*tmp
;
1061 struct drm_vmw_stream_arg
*arg
= (struct drm_vmw_stream_arg
*)data
;
1062 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1065 if (unlikely(stream
== NULL
))
1068 res
= &stream
->stream
.res
;
1069 stream
->base
.shareable
= false;
1070 stream
->base
.tfile
= NULL
;
1072 ret
= vmw_stream_init(dev_priv
, &stream
->stream
, vmw_user_stream_free
);
1073 if (unlikely(ret
!= 0))
1076 tmp
= vmw_resource_reference(res
);
1077 ret
= ttm_base_object_init(tfile
, &stream
->base
, false, VMW_RES_STREAM
,
1078 &vmw_user_stream_base_release
, NULL
);
1080 if (unlikely(ret
!= 0)) {
1081 vmw_resource_unreference(&tmp
);
1085 arg
->stream_id
= res
->id
;
1087 vmw_resource_unreference(&res
);
1091 int vmw_user_stream_lookup(struct vmw_private
*dev_priv
,
1092 struct ttm_object_file
*tfile
,
1093 uint32_t *inout_id
, struct vmw_resource
**out
)
1095 struct vmw_user_stream
*stream
;
1096 struct vmw_resource
*res
;
1099 res
= vmw_resource_lookup(dev_priv
, &dev_priv
->stream_idr
, *inout_id
);
1100 if (unlikely(res
== NULL
))
1103 if (res
->res_free
!= &vmw_user_stream_free
) {
1108 stream
= container_of(res
, struct vmw_user_stream
, stream
.res
);
1109 if (stream
->base
.tfile
!= tfile
) {
1114 *inout_id
= stream
->stream
.stream_id
;
1118 vmw_resource_unreference(&res
);