1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_drm.h"
30 #include "ttm/ttm_object.h"
31 #include "ttm/ttm_placement.h"
34 struct vmw_user_context
{
35 struct ttm_base_object base
;
36 struct vmw_resource res
;
39 struct vmw_user_surface
{
40 struct ttm_base_object base
;
41 struct vmw_surface srf
;
45 struct vmw_user_dma_buffer
{
46 struct ttm_base_object base
;
47 struct vmw_dma_buffer dma
;
50 struct vmw_bo_user_rep
{
56 struct vmw_resource res
;
60 struct vmw_user_stream
{
61 struct ttm_base_object base
;
62 struct vmw_stream stream
;
65 struct vmw_surface_offset
{
72 static uint64_t vmw_user_context_size
;
73 static uint64_t vmw_user_surface_size
;
74 static uint64_t vmw_user_stream_size
;
76 static inline struct vmw_dma_buffer
*
77 vmw_dma_buffer(struct ttm_buffer_object
*bo
)
79 return container_of(bo
, struct vmw_dma_buffer
, base
);
82 static inline struct vmw_user_dma_buffer
*
83 vmw_user_dma_buffer(struct ttm_buffer_object
*bo
)
85 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
86 return container_of(vmw_bo
, struct vmw_user_dma_buffer
, dma
);
89 struct vmw_resource
*vmw_resource_reference(struct vmw_resource
*res
)
97 * vmw_resource_release_id - release a resource id to the id manager.
99 * @res: Pointer to the resource.
101 * Release the resource id to the resource id manager and set it to -1
103 static void vmw_resource_release_id(struct vmw_resource
*res
)
105 struct vmw_private
*dev_priv
= res
->dev_priv
;
107 write_lock(&dev_priv
->resource_lock
);
109 idr_remove(res
->idr
, res
->id
);
111 write_unlock(&dev_priv
->resource_lock
);
114 static void vmw_resource_release(struct kref
*kref
)
116 struct vmw_resource
*res
=
117 container_of(kref
, struct vmw_resource
, kref
);
118 struct vmw_private
*dev_priv
= res
->dev_priv
;
120 struct idr
*idr
= res
->idr
;
123 if (res
->remove_from_lists
!= NULL
)
124 res
->remove_from_lists(res
);
125 write_unlock(&dev_priv
->resource_lock
);
127 if (likely(res
->hw_destroy
!= NULL
))
128 res
->hw_destroy(res
);
130 if (res
->res_free
!= NULL
)
135 write_lock(&dev_priv
->resource_lock
);
141 void vmw_resource_unreference(struct vmw_resource
**p_res
)
143 struct vmw_resource
*res
= *p_res
;
144 struct vmw_private
*dev_priv
= res
->dev_priv
;
147 write_lock(&dev_priv
->resource_lock
);
148 kref_put(&res
->kref
, vmw_resource_release
);
149 write_unlock(&dev_priv
->resource_lock
);
154 * vmw_resource_alloc_id - release a resource id to the id manager.
156 * @dev_priv: Pointer to the device private structure.
157 * @res: Pointer to the resource.
159 * Allocate the lowest free resource from the resource manager, and set
160 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
162 static int vmw_resource_alloc_id(struct vmw_private
*dev_priv
,
163 struct vmw_resource
*res
)
167 BUG_ON(res
->id
!= -1);
170 if (unlikely(idr_pre_get(res
->idr
, GFP_KERNEL
) == 0))
173 write_lock(&dev_priv
->resource_lock
);
174 ret
= idr_get_new_above(res
->idr
, res
, 1, &res
->id
);
175 write_unlock(&dev_priv
->resource_lock
);
177 } while (ret
== -EAGAIN
);
183 static int vmw_resource_init(struct vmw_private
*dev_priv
,
184 struct vmw_resource
*res
,
186 enum ttm_object_type obj_type
,
188 void (*res_free
) (struct vmw_resource
*res
),
189 void (*remove_from_lists
)
190 (struct vmw_resource
*res
))
192 kref_init(&res
->kref
);
193 res
->hw_destroy
= NULL
;
194 res
->res_free
= res_free
;
195 res
->remove_from_lists
= remove_from_lists
;
196 res
->res_type
= obj_type
;
199 res
->dev_priv
= dev_priv
;
200 INIT_LIST_HEAD(&res
->query_head
);
201 INIT_LIST_HEAD(&res
->validate_head
);
206 return vmw_resource_alloc_id(dev_priv
, res
);
210 * vmw_resource_activate
212 * @res: Pointer to the newly created resource
213 * @hw_destroy: Destroy function. NULL if none.
215 * Activate a resource after the hardware has been made aware of it.
216 * Set tye destroy function to @destroy. Typically this frees the
217 * resource and destroys the hardware resources associated with it.
218 * Activate basically means that the function vmw_resource_lookup will
222 static void vmw_resource_activate(struct vmw_resource
*res
,
223 void (*hw_destroy
) (struct vmw_resource
*))
225 struct vmw_private
*dev_priv
= res
->dev_priv
;
227 write_lock(&dev_priv
->resource_lock
);
229 res
->hw_destroy
= hw_destroy
;
230 write_unlock(&dev_priv
->resource_lock
);
233 struct vmw_resource
*vmw_resource_lookup(struct vmw_private
*dev_priv
,
234 struct idr
*idr
, int id
)
236 struct vmw_resource
*res
;
238 read_lock(&dev_priv
->resource_lock
);
239 res
= idr_find(idr
, id
);
240 if (res
&& res
->avail
)
241 kref_get(&res
->kref
);
244 read_unlock(&dev_priv
->resource_lock
);
246 if (unlikely(res
== NULL
))
253 * Context management:
256 static void vmw_hw_context_destroy(struct vmw_resource
*res
)
259 struct vmw_private
*dev_priv
= res
->dev_priv
;
261 SVGA3dCmdHeader header
;
262 SVGA3dCmdDestroyContext body
;
266 vmw_execbuf_release_pinned_bo(dev_priv
, true, res
->id
);
268 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
269 if (unlikely(cmd
== NULL
)) {
270 DRM_ERROR("Failed reserving FIFO space for surface "
275 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY
);
276 cmd
->header
.size
= cpu_to_le32(sizeof(cmd
->body
));
277 cmd
->body
.cid
= cpu_to_le32(res
->id
);
279 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
280 vmw_3d_resource_dec(dev_priv
, false);
283 static int vmw_context_init(struct vmw_private
*dev_priv
,
284 struct vmw_resource
*res
,
285 void (*res_free
) (struct vmw_resource
*res
))
290 SVGA3dCmdHeader header
;
291 SVGA3dCmdDefineContext body
;
294 ret
= vmw_resource_init(dev_priv
, res
, &dev_priv
->context_idr
,
295 VMW_RES_CONTEXT
, false, res_free
, NULL
);
297 if (unlikely(ret
!= 0)) {
298 DRM_ERROR("Failed to allocate a resource id.\n");
302 if (unlikely(res
->id
>= SVGA3D_MAX_CONTEXT_IDS
)) {
303 DRM_ERROR("Out of hw context ids.\n");
304 vmw_resource_unreference(&res
);
308 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
309 if (unlikely(cmd
== NULL
)) {
310 DRM_ERROR("Fifo reserve failed.\n");
311 vmw_resource_unreference(&res
);
315 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE
);
316 cmd
->header
.size
= cpu_to_le32(sizeof(cmd
->body
));
317 cmd
->body
.cid
= cpu_to_le32(res
->id
);
319 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
320 (void) vmw_3d_resource_inc(dev_priv
, false);
321 vmw_resource_activate(res
, vmw_hw_context_destroy
);
325 if (res_free
== NULL
)
332 struct vmw_resource
*vmw_context_alloc(struct vmw_private
*dev_priv
)
334 struct vmw_resource
*res
= kmalloc(sizeof(*res
), GFP_KERNEL
);
337 if (unlikely(res
== NULL
))
340 ret
= vmw_context_init(dev_priv
, res
, NULL
);
341 return (ret
== 0) ? res
: NULL
;
345 * User-space context management:
348 static void vmw_user_context_free(struct vmw_resource
*res
)
350 struct vmw_user_context
*ctx
=
351 container_of(res
, struct vmw_user_context
, res
);
352 struct vmw_private
*dev_priv
= res
->dev_priv
;
355 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
356 vmw_user_context_size
);
360 * This function is called when user space has no more references on the
361 * base object. It releases the base-object's reference on the resource object.
364 static void vmw_user_context_base_release(struct ttm_base_object
**p_base
)
366 struct ttm_base_object
*base
= *p_base
;
367 struct vmw_user_context
*ctx
=
368 container_of(base
, struct vmw_user_context
, base
);
369 struct vmw_resource
*res
= &ctx
->res
;
372 vmw_resource_unreference(&res
);
375 int vmw_context_destroy_ioctl(struct drm_device
*dev
, void *data
,
376 struct drm_file
*file_priv
)
378 struct vmw_private
*dev_priv
= vmw_priv(dev
);
379 struct vmw_resource
*res
;
380 struct vmw_user_context
*ctx
;
381 struct drm_vmw_context_arg
*arg
= (struct drm_vmw_context_arg
*)data
;
382 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
385 res
= vmw_resource_lookup(dev_priv
, &dev_priv
->context_idr
, arg
->cid
);
386 if (unlikely(res
== NULL
))
389 if (res
->res_free
!= &vmw_user_context_free
) {
394 ctx
= container_of(res
, struct vmw_user_context
, res
);
395 if (ctx
->base
.tfile
!= tfile
&& !ctx
->base
.shareable
) {
400 ttm_ref_object_base_unref(tfile
, ctx
->base
.hash
.key
, TTM_REF_USAGE
);
402 vmw_resource_unreference(&res
);
406 int vmw_context_define_ioctl(struct drm_device
*dev
, void *data
,
407 struct drm_file
*file_priv
)
409 struct vmw_private
*dev_priv
= vmw_priv(dev
);
410 struct vmw_user_context
*ctx
;
411 struct vmw_resource
*res
;
412 struct vmw_resource
*tmp
;
413 struct drm_vmw_context_arg
*arg
= (struct drm_vmw_context_arg
*)data
;
414 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
415 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
420 * Approximate idr memory usage with 128 bytes. It will be limited
421 * by maximum number_of contexts anyway.
424 if (unlikely(vmw_user_context_size
== 0))
425 vmw_user_context_size
= ttm_round_pot(sizeof(*ctx
)) + 128;
427 ret
= ttm_read_lock(&vmaster
->lock
, true);
428 if (unlikely(ret
!= 0))
431 ret
= ttm_mem_global_alloc(vmw_mem_glob(dev_priv
),
432 vmw_user_context_size
,
434 if (unlikely(ret
!= 0)) {
435 if (ret
!= -ERESTARTSYS
)
436 DRM_ERROR("Out of graphics memory for context"
441 ctx
= kmalloc(sizeof(*ctx
), GFP_KERNEL
);
442 if (unlikely(ctx
== NULL
)) {
443 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
444 vmw_user_context_size
);
450 ctx
->base
.shareable
= false;
451 ctx
->base
.tfile
= NULL
;
454 * From here on, the destructor takes over resource freeing.
457 ret
= vmw_context_init(dev_priv
, res
, vmw_user_context_free
);
458 if (unlikely(ret
!= 0))
461 tmp
= vmw_resource_reference(&ctx
->res
);
462 ret
= ttm_base_object_init(tfile
, &ctx
->base
, false, VMW_RES_CONTEXT
,
463 &vmw_user_context_base_release
, NULL
);
465 if (unlikely(ret
!= 0)) {
466 vmw_resource_unreference(&tmp
);
472 vmw_resource_unreference(&res
);
474 ttm_read_unlock(&vmaster
->lock
);
479 int vmw_context_check(struct vmw_private
*dev_priv
,
480 struct ttm_object_file
*tfile
,
482 struct vmw_resource
**p_res
)
484 struct vmw_resource
*res
;
487 read_lock(&dev_priv
->resource_lock
);
488 res
= idr_find(&dev_priv
->context_idr
, id
);
489 if (res
&& res
->avail
) {
490 struct vmw_user_context
*ctx
=
491 container_of(res
, struct vmw_user_context
, res
);
492 if (ctx
->base
.tfile
!= tfile
&& !ctx
->base
.shareable
)
495 *p_res
= vmw_resource_reference(res
);
498 read_unlock(&dev_priv
->resource_lock
);
509 * Size table for the supported SVGA3D surface formats. It consists of
510 * two values. The bpp value and the s_bpp value which is short for
511 * "stride bits per pixel" The values are given in such a way that the
512 * minimum stride for the image is calculated using
514 * min_stride = w*s_bpp
516 * and the total memory requirement for the image is
518 * h*min_stride*bpp/s_bpp
521 static const struct vmw_bpp vmw_sf_bpp
[] = {
522 [SVGA3D_FORMAT_INVALID
] = {0, 0},
523 [SVGA3D_X8R8G8B8
] = {32, 32},
524 [SVGA3D_A8R8G8B8
] = {32, 32},
525 [SVGA3D_R5G6B5
] = {16, 16},
526 [SVGA3D_X1R5G5B5
] = {16, 16},
527 [SVGA3D_A1R5G5B5
] = {16, 16},
528 [SVGA3D_A4R4G4B4
] = {16, 16},
529 [SVGA3D_Z_D32
] = {32, 32},
530 [SVGA3D_Z_D16
] = {16, 16},
531 [SVGA3D_Z_D24S8
] = {32, 32},
532 [SVGA3D_Z_D15S1
] = {16, 16},
533 [SVGA3D_LUMINANCE8
] = {8, 8},
534 [SVGA3D_LUMINANCE4_ALPHA4
] = {8, 8},
535 [SVGA3D_LUMINANCE16
] = {16, 16},
536 [SVGA3D_LUMINANCE8_ALPHA8
] = {16, 16},
537 [SVGA3D_DXT1
] = {4, 16},
538 [SVGA3D_DXT2
] = {8, 32},
539 [SVGA3D_DXT3
] = {8, 32},
540 [SVGA3D_DXT4
] = {8, 32},
541 [SVGA3D_DXT5
] = {8, 32},
542 [SVGA3D_BUMPU8V8
] = {16, 16},
543 [SVGA3D_BUMPL6V5U5
] = {16, 16},
544 [SVGA3D_BUMPX8L8V8U8
] = {32, 32},
545 [SVGA3D_ARGB_S10E5
] = {16, 16},
546 [SVGA3D_ARGB_S23E8
] = {32, 32},
547 [SVGA3D_A2R10G10B10
] = {32, 32},
548 [SVGA3D_V8U8
] = {16, 16},
549 [SVGA3D_Q8W8V8U8
] = {32, 32},
550 [SVGA3D_CxV8U8
] = {16, 16},
551 [SVGA3D_X8L8V8U8
] = {32, 32},
552 [SVGA3D_A2W10V10U10
] = {32, 32},
553 [SVGA3D_ALPHA8
] = {8, 8},
554 [SVGA3D_R_S10E5
] = {16, 16},
555 [SVGA3D_R_S23E8
] = {32, 32},
556 [SVGA3D_RG_S10E5
] = {16, 16},
557 [SVGA3D_RG_S23E8
] = {32, 32},
558 [SVGA3D_BUFFER
] = {8, 8},
559 [SVGA3D_Z_D24X8
] = {32, 32},
560 [SVGA3D_V16U16
] = {32, 32},
561 [SVGA3D_G16R16
] = {32, 32},
562 [SVGA3D_A16B16G16R16
] = {64, 64},
563 [SVGA3D_UYVY
] = {12, 12},
564 [SVGA3D_YUY2
] = {12, 12},
565 [SVGA3D_NV12
] = {12, 8},
566 [SVGA3D_AYUV
] = {32, 32},
567 [SVGA3D_BC4_UNORM
] = {4, 16},
568 [SVGA3D_BC5_UNORM
] = {8, 32},
569 [SVGA3D_Z_DF16
] = {16, 16},
570 [SVGA3D_Z_DF24
] = {24, 24},
571 [SVGA3D_Z_D24S8_INT
] = {32, 32}
576 * Surface management.
579 struct vmw_surface_dma
{
580 SVGA3dCmdHeader header
;
581 SVGA3dCmdSurfaceDMA body
;
583 SVGA3dCmdSurfaceDMASuffix suffix
;
586 struct vmw_surface_define
{
587 SVGA3dCmdHeader header
;
588 SVGA3dCmdDefineSurface body
;
591 struct vmw_surface_destroy
{
592 SVGA3dCmdHeader header
;
593 SVGA3dCmdDestroySurface body
;
598 * vmw_surface_dma_size - Compute fifo size for a dma command.
600 * @srf: Pointer to a struct vmw_surface
602 * Computes the required size for a surface dma command for backup or
603 * restoration of the surface represented by @srf.
605 static inline uint32_t vmw_surface_dma_size(const struct vmw_surface
*srf
)
607 return srf
->num_sizes
* sizeof(struct vmw_surface_dma
);
612 * vmw_surface_define_size - Compute fifo size for a surface define command.
614 * @srf: Pointer to a struct vmw_surface
616 * Computes the required size for a surface define command for the definition
617 * of the surface represented by @srf.
619 static inline uint32_t vmw_surface_define_size(const struct vmw_surface
*srf
)
621 return sizeof(struct vmw_surface_define
) + srf
->num_sizes
*
627 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
629 * Computes the required size for a surface destroy command for the destruction
632 static inline uint32_t vmw_surface_destroy_size(void)
634 return sizeof(struct vmw_surface_destroy
);
638 * vmw_surface_destroy_encode - Encode a surface_destroy command.
640 * @id: The surface id
641 * @cmd_space: Pointer to memory area in which the commands should be encoded.
643 static void vmw_surface_destroy_encode(uint32_t id
,
646 struct vmw_surface_destroy
*cmd
= (struct vmw_surface_destroy
*)
649 cmd
->header
.id
= SVGA_3D_CMD_SURFACE_DESTROY
;
650 cmd
->header
.size
= sizeof(cmd
->body
);
655 * vmw_surface_define_encode - Encode a surface_define command.
657 * @srf: Pointer to a struct vmw_surface object.
658 * @cmd_space: Pointer to memory area in which the commands should be encoded.
660 static void vmw_surface_define_encode(const struct vmw_surface
*srf
,
663 struct vmw_surface_define
*cmd
= (struct vmw_surface_define
*)
665 struct drm_vmw_size
*src_size
;
666 SVGA3dSize
*cmd_size
;
670 cmd_len
= sizeof(cmd
->body
) + srf
->num_sizes
* sizeof(SVGA3dSize
);
672 cmd
->header
.id
= SVGA_3D_CMD_SURFACE_DEFINE
;
673 cmd
->header
.size
= cmd_len
;
674 cmd
->body
.sid
= srf
->res
.id
;
675 cmd
->body
.surfaceFlags
= srf
->flags
;
676 cmd
->body
.format
= cpu_to_le32(srf
->format
);
677 for (i
= 0; i
< DRM_VMW_MAX_SURFACE_FACES
; ++i
)
678 cmd
->body
.face
[i
].numMipLevels
= srf
->mip_levels
[i
];
681 cmd_size
= (SVGA3dSize
*) cmd
;
682 src_size
= srf
->sizes
;
684 for (i
= 0; i
< srf
->num_sizes
; ++i
, cmd_size
++, src_size
++) {
685 cmd_size
->width
= src_size
->width
;
686 cmd_size
->height
= src_size
->height
;
687 cmd_size
->depth
= src_size
->depth
;
693 * vmw_surface_dma_encode - Encode a surface_dma command.
695 * @srf: Pointer to a struct vmw_surface object.
696 * @cmd_space: Pointer to memory area in which the commands should be encoded.
697 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
698 * should be placed or read from.
699 * @to_surface: Boolean whether to DMA to the surface or from the surface.
701 static void vmw_surface_dma_encode(struct vmw_surface
*srf
,
703 const SVGAGuestPtr
*ptr
,
707 uint32_t bpp
= vmw_sf_bpp
[srf
->format
].bpp
;
708 uint32_t stride_bpp
= vmw_sf_bpp
[srf
->format
].s_bpp
;
709 struct vmw_surface_dma
*cmd
= (struct vmw_surface_dma
*)cmd_space
;
711 for (i
= 0; i
< srf
->num_sizes
; ++i
) {
712 SVGA3dCmdHeader
*header
= &cmd
->header
;
713 SVGA3dCmdSurfaceDMA
*body
= &cmd
->body
;
714 SVGA3dCopyBox
*cb
= &cmd
->cb
;
715 SVGA3dCmdSurfaceDMASuffix
*suffix
= &cmd
->suffix
;
716 const struct vmw_surface_offset
*cur_offset
= &srf
->offsets
[i
];
717 const struct drm_vmw_size
*cur_size
= &srf
->sizes
[i
];
719 header
->id
= SVGA_3D_CMD_SURFACE_DMA
;
720 header
->size
= sizeof(*body
) + sizeof(*cb
) + sizeof(*suffix
);
722 body
->guest
.ptr
= *ptr
;
723 body
->guest
.ptr
.offset
+= cur_offset
->bo_offset
;
724 body
->guest
.pitch
= (cur_size
->width
* stride_bpp
+ 7) >> 3;
725 body
->host
.sid
= srf
->res
.id
;
726 body
->host
.face
= cur_offset
->face
;
727 body
->host
.mipmap
= cur_offset
->mip
;
728 body
->transfer
= ((to_surface
) ? SVGA3D_WRITE_HOST_VRAM
:
729 SVGA3D_READ_HOST_VRAM
);
736 cb
->w
= cur_size
->width
;
737 cb
->h
= cur_size
->height
;
738 cb
->d
= cur_size
->depth
;
740 suffix
->suffixSize
= sizeof(*suffix
);
741 suffix
->maximumOffset
= body
->guest
.pitch
*cur_size
->height
*
742 cur_size
->depth
*bpp
/ stride_bpp
;
743 suffix
->flags
.discard
= 0;
744 suffix
->flags
.unsynchronized
= 0;
745 suffix
->flags
.reserved
= 0;
751 static void vmw_hw_surface_destroy(struct vmw_resource
*res
)
754 struct vmw_private
*dev_priv
= res
->dev_priv
;
755 struct vmw_surface
*srf
;
760 cmd
= vmw_fifo_reserve(dev_priv
, vmw_surface_destroy_size());
761 if (unlikely(cmd
== NULL
)) {
762 DRM_ERROR("Failed reserving FIFO space for surface "
767 vmw_surface_destroy_encode(res
->id
, cmd
);
768 vmw_fifo_commit(dev_priv
, vmw_surface_destroy_size());
771 * used_memory_size_atomic, or separate lock
772 * to avoid taking dev_priv::cmdbuf_mutex in
776 mutex_lock(&dev_priv
->cmdbuf_mutex
);
777 srf
= container_of(res
, struct vmw_surface
, res
);
778 dev_priv
->used_memory_size
-= srf
->backup_size
;
779 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
782 vmw_3d_resource_dec(dev_priv
, false);
785 void vmw_surface_res_free(struct vmw_resource
*res
)
787 struct vmw_surface
*srf
= container_of(res
, struct vmw_surface
, res
);
790 ttm_bo_unref(&srf
->backup
);
793 kfree(srf
->snooper
.image
);
799 * vmw_surface_do_validate - make a surface available to the device.
801 * @dev_priv: Pointer to a device private struct.
802 * @srf: Pointer to a struct vmw_surface.
804 * If the surface doesn't have a hw id, allocate one, and optionally
805 * DMA the backed up surface contents to the device.
807 * Returns -EBUSY if there wasn't sufficient device resources to
808 * complete the validation. Retry after freeing up resources.
810 * May return other errors if the kernel is out of guest resources.
812 int vmw_surface_do_validate(struct vmw_private
*dev_priv
,
813 struct vmw_surface
*srf
)
815 struct vmw_resource
*res
= &srf
->res
;
816 struct list_head val_list
;
817 struct ttm_validate_buffer val_buf
;
818 uint32_t submit_size
;
822 if (likely(res
->id
!= -1))
825 if (unlikely(dev_priv
->used_memory_size
+ srf
->backup_size
>=
826 dev_priv
->memory_size
))
830 * Reserve- and validate the backup DMA bo.
834 INIT_LIST_HEAD(&val_list
);
835 val_buf
.bo
= ttm_bo_reference(srf
->backup
);
836 val_buf
.new_sync_obj_arg
= (void *)((unsigned long)
837 DRM_VMW_FENCE_FLAG_EXEC
);
838 list_add_tail(&val_buf
.head
, &val_list
);
839 ret
= ttm_eu_reserve_buffers(&val_list
);
840 if (unlikely(ret
!= 0))
843 ret
= ttm_bo_validate(srf
->backup
, &vmw_srf_placement
,
845 if (unlikely(ret
!= 0))
846 goto out_no_validate
;
850 * Alloc id for the resource.
853 ret
= vmw_resource_alloc_id(dev_priv
, res
);
854 if (unlikely(ret
!= 0)) {
855 DRM_ERROR("Failed to allocate a surface id.\n");
858 if (unlikely(res
->id
>= SVGA3D_MAX_SURFACE_IDS
)) {
865 * Encode surface define- and dma commands.
868 submit_size
= vmw_surface_define_size(srf
);
870 submit_size
+= vmw_surface_dma_size(srf
);
872 cmd
= vmw_fifo_reserve(dev_priv
, submit_size
);
873 if (unlikely(cmd
== NULL
)) {
874 DRM_ERROR("Failed reserving FIFO space for surface "
880 vmw_surface_define_encode(srf
, cmd
);
884 cmd
+= vmw_surface_define_size(srf
);
885 vmw_bo_get_guest_ptr(srf
->backup
, &ptr
);
886 vmw_surface_dma_encode(srf
, cmd
, &ptr
, true);
889 vmw_fifo_commit(dev_priv
, submit_size
);
892 * Create a fence object and fence the backup buffer.
896 struct vmw_fence_obj
*fence
;
898 (void) vmw_execbuf_fence_commands(NULL
, dev_priv
,
900 ttm_eu_fence_buffer_objects(&val_list
, fence
);
901 if (likely(fence
!= NULL
))
902 vmw_fence_obj_unreference(&fence
);
903 ttm_bo_unref(&val_buf
.bo
);
904 ttm_bo_unref(&srf
->backup
);
908 * Surface memory usage accounting.
911 dev_priv
->used_memory_size
+= srf
->backup_size
;
916 vmw_resource_release_id(res
);
920 ttm_eu_backoff_reservation(&val_list
);
923 ttm_bo_unref(&val_buf
.bo
);
928 * vmw_surface_evict - Evict a hw surface.
930 * @dev_priv: Pointer to a device private struct.
931 * @srf: Pointer to a struct vmw_surface
933 * DMA the contents of a hw surface to a backup guest buffer object,
934 * and destroy the hw surface, releasing its id.
936 int vmw_surface_evict(struct vmw_private
*dev_priv
,
937 struct vmw_surface
*srf
)
939 struct vmw_resource
*res
= &srf
->res
;
940 struct list_head val_list
;
941 struct ttm_validate_buffer val_buf
;
942 uint32_t submit_size
;
945 struct vmw_fence_obj
*fence
;
948 BUG_ON(res
->id
== -1);
951 * Create a surface backup buffer object.
955 ret
= ttm_bo_create(&dev_priv
->bdev
, srf
->backup_size
,
957 &vmw_srf_placement
, 0, 0, true,
959 if (unlikely(ret
!= 0))
964 * Reserve- and validate the backup DMA bo.
967 INIT_LIST_HEAD(&val_list
);
968 val_buf
.bo
= ttm_bo_reference(srf
->backup
);
969 val_buf
.new_sync_obj_arg
= (void *)(unsigned long)
970 DRM_VMW_FENCE_FLAG_EXEC
;
971 list_add_tail(&val_buf
.head
, &val_list
);
972 ret
= ttm_eu_reserve_buffers(&val_list
);
973 if (unlikely(ret
!= 0))
976 ret
= ttm_bo_validate(srf
->backup
, &vmw_srf_placement
,
978 if (unlikely(ret
!= 0))
979 goto out_no_validate
;
983 * Encode the dma- and surface destroy commands.
986 submit_size
= vmw_surface_dma_size(srf
) + vmw_surface_destroy_size();
987 cmd
= vmw_fifo_reserve(dev_priv
, submit_size
);
988 if (unlikely(cmd
== NULL
)) {
989 DRM_ERROR("Failed reserving FIFO space for surface "
995 vmw_bo_get_guest_ptr(srf
->backup
, &ptr
);
996 vmw_surface_dma_encode(srf
, cmd
, &ptr
, false);
997 cmd
+= vmw_surface_dma_size(srf
);
998 vmw_surface_destroy_encode(res
->id
, cmd
);
999 vmw_fifo_commit(dev_priv
, submit_size
);
1002 * Surface memory usage accounting.
1005 dev_priv
->used_memory_size
-= srf
->backup_size
;
1008 * Create a fence object and fence the DMA buffer.
1011 (void) vmw_execbuf_fence_commands(NULL
, dev_priv
,
1013 ttm_eu_fence_buffer_objects(&val_list
, fence
);
1014 if (likely(fence
!= NULL
))
1015 vmw_fence_obj_unreference(&fence
);
1016 ttm_bo_unref(&val_buf
.bo
);
1019 * Release the surface ID.
1022 vmw_resource_release_id(res
);
1029 ttm_eu_backoff_reservation(&val_list
);
1031 ttm_bo_unref(&val_buf
.bo
);
1032 ttm_bo_unref(&srf
->backup
);
1038 * vmw_surface_validate - make a surface available to the device, evicting
1039 * other surfaces if needed.
1041 * @dev_priv: Pointer to a device private struct.
1042 * @srf: Pointer to a struct vmw_surface.
1044 * Try to validate a surface and if it fails due to limited device resources,
1045 * repeatedly try to evict other surfaces until the request can be
1048 * May return errors if out of resources.
1050 int vmw_surface_validate(struct vmw_private
*dev_priv
,
1051 struct vmw_surface
*srf
)
1054 struct vmw_surface
*evict_srf
;
1057 write_lock(&dev_priv
->resource_lock
);
1058 list_del_init(&srf
->lru_head
);
1059 write_unlock(&dev_priv
->resource_lock
);
1061 ret
= vmw_surface_do_validate(dev_priv
, srf
);
1062 if (likely(ret
!= -EBUSY
))
1065 write_lock(&dev_priv
->resource_lock
);
1066 if (list_empty(&dev_priv
->surface_lru
)) {
1067 DRM_ERROR("Out of device memory for surfaces.\n");
1069 write_unlock(&dev_priv
->resource_lock
);
1073 evict_srf
= vmw_surface_reference
1074 (list_first_entry(&dev_priv
->surface_lru
,
1077 list_del_init(&evict_srf
->lru_head
);
1079 write_unlock(&dev_priv
->resource_lock
);
1080 (void) vmw_surface_evict(dev_priv
, evict_srf
);
1082 vmw_surface_unreference(&evict_srf
);
1086 if (unlikely(ret
!= 0 && srf
->res
.id
!= -1)) {
1087 write_lock(&dev_priv
->resource_lock
);
1088 list_add_tail(&srf
->lru_head
, &dev_priv
->surface_lru
);
1089 write_unlock(&dev_priv
->resource_lock
);
1097 * vmw_surface_remove_from_lists - Remove surface resources from lookup lists
1099 * @res: Pointer to a struct vmw_resource embedded in a struct vmw_surface
1101 * As part of the resource destruction, remove the surface from any
1104 static void vmw_surface_remove_from_lists(struct vmw_resource
*res
)
1106 struct vmw_surface
*srf
= container_of(res
, struct vmw_surface
, res
);
1108 list_del_init(&srf
->lru_head
);
1111 int vmw_surface_init(struct vmw_private
*dev_priv
,
1112 struct vmw_surface
*srf
,
1113 void (*res_free
) (struct vmw_resource
*res
))
1116 struct vmw_resource
*res
= &srf
->res
;
1118 BUG_ON(res_free
== NULL
);
1119 INIT_LIST_HEAD(&srf
->lru_head
);
1120 ret
= vmw_resource_init(dev_priv
, res
, &dev_priv
->surface_idr
,
1121 VMW_RES_SURFACE
, true, res_free
,
1122 vmw_surface_remove_from_lists
);
1124 if (unlikely(ret
!= 0))
1128 * The surface won't be visible to hardware until a
1132 (void) vmw_3d_resource_inc(dev_priv
, false);
1133 vmw_resource_activate(res
, vmw_hw_surface_destroy
);
1137 static void vmw_user_surface_free(struct vmw_resource
*res
)
1139 struct vmw_surface
*srf
= container_of(res
, struct vmw_surface
, res
);
1140 struct vmw_user_surface
*user_srf
=
1141 container_of(srf
, struct vmw_user_surface
, srf
);
1142 struct vmw_private
*dev_priv
= srf
->res
.dev_priv
;
1143 uint32_t size
= user_srf
->size
;
1146 ttm_bo_unref(&srf
->backup
);
1147 kfree(srf
->offsets
);
1149 kfree(srf
->snooper
.image
);
1151 ttm_mem_global_free(vmw_mem_glob(dev_priv
), size
);
1155 * vmw_resource_unreserve - unreserve resources previously reserved for
1156 * command submission.
1158 * @list_head: list of resources to unreserve.
1160 * Currently only surfaces are considered, and unreserving a surface
1161 * means putting it back on the device's surface lru list,
1162 * so that it can be evicted if necessary.
1163 * This function traverses the resource list and
1164 * checks whether resources are surfaces, and in that case puts them back
1165 * on the device's surface LRU list.
1167 void vmw_resource_unreserve(struct list_head
*list
)
1169 struct vmw_resource
*res
;
1170 struct vmw_surface
*srf
;
1171 rwlock_t
*lock
= NULL
;
1173 list_for_each_entry(res
, list
, validate_head
) {
1175 if (res
->res_free
!= &vmw_surface_res_free
&&
1176 res
->res_free
!= &vmw_user_surface_free
)
1179 if (unlikely(lock
== NULL
)) {
1180 lock
= &res
->dev_priv
->resource_lock
;
1184 srf
= container_of(res
, struct vmw_surface
, res
);
1185 list_del_init(&srf
->lru_head
);
1186 list_add_tail(&srf
->lru_head
, &res
->dev_priv
->surface_lru
);
1194 int vmw_user_surface_lookup_handle(struct vmw_private
*dev_priv
,
1195 struct ttm_object_file
*tfile
,
1196 uint32_t handle
, struct vmw_surface
**out
)
1198 struct vmw_resource
*res
;
1199 struct vmw_surface
*srf
;
1200 struct vmw_user_surface
*user_srf
;
1201 struct ttm_base_object
*base
;
1204 base
= ttm_base_object_lookup(tfile
, handle
);
1205 if (unlikely(base
== NULL
))
1208 if (unlikely(base
->object_type
!= VMW_RES_SURFACE
))
1209 goto out_bad_resource
;
1211 user_srf
= container_of(base
, struct vmw_user_surface
, base
);
1212 srf
= &user_srf
->srf
;
1215 read_lock(&dev_priv
->resource_lock
);
1217 if (!res
->avail
|| res
->res_free
!= &vmw_user_surface_free
) {
1218 read_unlock(&dev_priv
->resource_lock
);
1219 goto out_bad_resource
;
1222 kref_get(&res
->kref
);
1223 read_unlock(&dev_priv
->resource_lock
);
1229 ttm_base_object_unref(&base
);
1234 static void vmw_user_surface_base_release(struct ttm_base_object
**p_base
)
1236 struct ttm_base_object
*base
= *p_base
;
1237 struct vmw_user_surface
*user_srf
=
1238 container_of(base
, struct vmw_user_surface
, base
);
1239 struct vmw_resource
*res
= &user_srf
->srf
.res
;
1242 vmw_resource_unreference(&res
);
1245 int vmw_surface_destroy_ioctl(struct drm_device
*dev
, void *data
,
1246 struct drm_file
*file_priv
)
1248 struct drm_vmw_surface_arg
*arg
= (struct drm_vmw_surface_arg
*)data
;
1249 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1251 return ttm_ref_object_base_unref(tfile
, arg
->sid
, TTM_REF_USAGE
);
1254 int vmw_surface_define_ioctl(struct drm_device
*dev
, void *data
,
1255 struct drm_file
*file_priv
)
1257 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1258 struct vmw_user_surface
*user_srf
;
1259 struct vmw_surface
*srf
;
1260 struct vmw_resource
*res
;
1261 struct vmw_resource
*tmp
;
1262 union drm_vmw_surface_create_arg
*arg
=
1263 (union drm_vmw_surface_create_arg
*)data
;
1264 struct drm_vmw_surface_create_req
*req
= &arg
->req
;
1265 struct drm_vmw_surface_arg
*rep
= &arg
->rep
;
1266 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1267 struct drm_vmw_size __user
*user_sizes
;
1270 uint32_t cur_bo_offset
;
1271 struct drm_vmw_size
*cur_size
;
1272 struct vmw_surface_offset
*cur_offset
;
1273 uint32_t stride_bpp
;
1277 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
1279 if (unlikely(vmw_user_surface_size
== 0))
1280 vmw_user_surface_size
= ttm_round_pot(sizeof(*user_srf
)) +
1284 for (i
= 0; i
< DRM_VMW_MAX_SURFACE_FACES
; ++i
)
1285 num_sizes
+= req
->mip_levels
[i
];
1287 if (num_sizes
> DRM_VMW_MAX_SURFACE_FACES
*
1288 DRM_VMW_MAX_MIP_LEVELS
)
1291 size
= vmw_user_surface_size
+ 128 +
1292 ttm_round_pot(num_sizes
* sizeof(struct drm_vmw_size
)) +
1293 ttm_round_pot(num_sizes
* sizeof(struct vmw_surface_offset
));
1296 ret
= ttm_read_lock(&vmaster
->lock
, true);
1297 if (unlikely(ret
!= 0))
1300 ret
= ttm_mem_global_alloc(vmw_mem_glob(dev_priv
),
1302 if (unlikely(ret
!= 0)) {
1303 if (ret
!= -ERESTARTSYS
)
1304 DRM_ERROR("Out of graphics memory for surface"
1309 user_srf
= kmalloc(sizeof(*user_srf
), GFP_KERNEL
);
1310 if (unlikely(user_srf
== NULL
)) {
1312 goto out_no_user_srf
;
1315 srf
= &user_srf
->srf
;
1318 srf
->flags
= req
->flags
;
1319 srf
->format
= req
->format
;
1320 srf
->scanout
= req
->scanout
;
1323 memcpy(srf
->mip_levels
, req
->mip_levels
, sizeof(srf
->mip_levels
));
1324 srf
->num_sizes
= num_sizes
;
1325 user_srf
->size
= size
;
1327 srf
->sizes
= kmalloc(srf
->num_sizes
* sizeof(*srf
->sizes
), GFP_KERNEL
);
1328 if (unlikely(srf
->sizes
== NULL
)) {
1332 srf
->offsets
= kmalloc(srf
->num_sizes
* sizeof(*srf
->offsets
),
1334 if (unlikely(srf
->sizes
== NULL
)) {
1336 goto out_no_offsets
;
1339 user_sizes
= (struct drm_vmw_size __user
*)(unsigned long)
1342 ret
= copy_from_user(srf
->sizes
, user_sizes
,
1343 srf
->num_sizes
* sizeof(*srf
->sizes
));
1344 if (unlikely(ret
!= 0)) {
1350 cur_offset
= srf
->offsets
;
1351 cur_size
= srf
->sizes
;
1353 bpp
= vmw_sf_bpp
[srf
->format
].bpp
;
1354 stride_bpp
= vmw_sf_bpp
[srf
->format
].s_bpp
;
1356 for (i
= 0; i
< DRM_VMW_MAX_SURFACE_FACES
; ++i
) {
1357 for (j
= 0; j
< srf
->mip_levels
[i
]; ++j
) {
1359 (cur_size
->width
* stride_bpp
+ 7) >> 3;
1361 cur_offset
->face
= i
;
1362 cur_offset
->mip
= j
;
1363 cur_offset
->bo_offset
= cur_bo_offset
;
1364 cur_bo_offset
+= stride
* cur_size
->height
*
1365 cur_size
->depth
* bpp
/ stride_bpp
;
1370 srf
->backup_size
= cur_bo_offset
;
1373 srf
->num_sizes
== 1 &&
1374 srf
->sizes
[0].width
== 64 &&
1375 srf
->sizes
[0].height
== 64 &&
1376 srf
->format
== SVGA3D_A8R8G8B8
) {
1378 /* allocate image area and clear it */
1379 srf
->snooper
.image
= kzalloc(64 * 64 * 4, GFP_KERNEL
);
1380 if (!srf
->snooper
.image
) {
1381 DRM_ERROR("Failed to allocate cursor_image\n");
1386 srf
->snooper
.image
= NULL
;
1388 srf
->snooper
.crtc
= NULL
;
1390 user_srf
->base
.shareable
= false;
1391 user_srf
->base
.tfile
= NULL
;
1394 * From this point, the generic resource management functions
1395 * destroy the object on failure.
1398 ret
= vmw_surface_init(dev_priv
, srf
, vmw_user_surface_free
);
1399 if (unlikely(ret
!= 0))
1402 tmp
= vmw_resource_reference(&srf
->res
);
1403 ret
= ttm_base_object_init(tfile
, &user_srf
->base
,
1404 req
->shareable
, VMW_RES_SURFACE
,
1405 &vmw_user_surface_base_release
, NULL
);
1407 if (unlikely(ret
!= 0)) {
1408 vmw_resource_unreference(&tmp
);
1409 vmw_resource_unreference(&res
);
1413 rep
->sid
= user_srf
->base
.hash
.key
;
1414 if (rep
->sid
== SVGA3D_INVALID_ID
)
1415 DRM_ERROR("Created bad Surface ID.\n");
1417 vmw_resource_unreference(&res
);
1419 ttm_read_unlock(&vmaster
->lock
);
1422 kfree(srf
->offsets
);
1428 ttm_mem_global_free(vmw_mem_glob(dev_priv
), size
);
1430 ttm_read_unlock(&vmaster
->lock
);
1434 int vmw_surface_reference_ioctl(struct drm_device
*dev
, void *data
,
1435 struct drm_file
*file_priv
)
1437 union drm_vmw_surface_reference_arg
*arg
=
1438 (union drm_vmw_surface_reference_arg
*)data
;
1439 struct drm_vmw_surface_arg
*req
= &arg
->req
;
1440 struct drm_vmw_surface_create_req
*rep
= &arg
->rep
;
1441 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1442 struct vmw_surface
*srf
;
1443 struct vmw_user_surface
*user_srf
;
1444 struct drm_vmw_size __user
*user_sizes
;
1445 struct ttm_base_object
*base
;
1448 base
= ttm_base_object_lookup(tfile
, req
->sid
);
1449 if (unlikely(base
== NULL
)) {
1450 DRM_ERROR("Could not find surface to reference.\n");
1454 if (unlikely(base
->object_type
!= VMW_RES_SURFACE
))
1455 goto out_bad_resource
;
1457 user_srf
= container_of(base
, struct vmw_user_surface
, base
);
1458 srf
= &user_srf
->srf
;
1460 ret
= ttm_ref_object_add(tfile
, &user_srf
->base
, TTM_REF_USAGE
, NULL
);
1461 if (unlikely(ret
!= 0)) {
1462 DRM_ERROR("Could not add a reference to a surface.\n");
1463 goto out_no_reference
;
1466 rep
->flags
= srf
->flags
;
1467 rep
->format
= srf
->format
;
1468 memcpy(rep
->mip_levels
, srf
->mip_levels
, sizeof(srf
->mip_levels
));
1469 user_sizes
= (struct drm_vmw_size __user
*)(unsigned long)
1473 ret
= copy_to_user(user_sizes
, srf
->sizes
,
1474 srf
->num_sizes
* sizeof(*srf
->sizes
));
1475 if (unlikely(ret
!= 0)) {
1476 DRM_ERROR("copy_to_user failed %p %u\n",
1477 user_sizes
, srf
->num_sizes
);
1482 ttm_base_object_unref(&base
);
1487 int vmw_surface_check(struct vmw_private
*dev_priv
,
1488 struct ttm_object_file
*tfile
,
1489 uint32_t handle
, int *id
)
1491 struct ttm_base_object
*base
;
1492 struct vmw_user_surface
*user_srf
;
1496 base
= ttm_base_object_lookup(tfile
, handle
);
1497 if (unlikely(base
== NULL
))
1500 if (unlikely(base
->object_type
!= VMW_RES_SURFACE
))
1501 goto out_bad_surface
;
1503 user_srf
= container_of(base
, struct vmw_user_surface
, base
);
1504 *id
= user_srf
->srf
.res
.id
;
1509 * FIXME: May deadlock here when called from the
1510 * command parsing code.
1513 ttm_base_object_unref(&base
);
1518 * Buffer management.
1521 static size_t vmw_dmabuf_acc_size(struct ttm_bo_global
*glob
,
1522 unsigned long num_pages
)
1524 static size_t bo_user_size
= ~0;
1526 size_t page_array_size
=
1527 (num_pages
* sizeof(void *) + PAGE_SIZE
- 1) & PAGE_MASK
;
1529 if (unlikely(bo_user_size
== ~0)) {
1530 bo_user_size
= glob
->ttm_bo_extra_size
+
1531 ttm_round_pot(sizeof(struct vmw_dma_buffer
));
1534 return bo_user_size
+ page_array_size
;
1537 void vmw_dmabuf_bo_free(struct ttm_buffer_object
*bo
)
1539 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
1540 struct ttm_bo_global
*glob
= bo
->glob
;
1542 ttm_mem_global_free(glob
->mem_glob
, bo
->acc_size
);
1546 int vmw_dmabuf_init(struct vmw_private
*dev_priv
,
1547 struct vmw_dma_buffer
*vmw_bo
,
1548 size_t size
, struct ttm_placement
*placement
,
1550 void (*bo_free
) (struct ttm_buffer_object
*bo
))
1552 struct ttm_bo_device
*bdev
= &dev_priv
->bdev
;
1553 struct ttm_mem_global
*mem_glob
= bdev
->glob
->mem_glob
;
1560 vmw_dmabuf_acc_size(bdev
->glob
,
1561 (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
);
1563 ret
= ttm_mem_global_alloc(mem_glob
, acc_size
, false, false);
1564 if (unlikely(ret
!= 0)) {
1565 /* we must free the bo here as
1566 * ttm_buffer_object_init does so as well */
1567 bo_free(&vmw_bo
->base
);
1571 memset(vmw_bo
, 0, sizeof(*vmw_bo
));
1573 INIT_LIST_HEAD(&vmw_bo
->validate_list
);
1575 ret
= ttm_bo_init(bdev
, &vmw_bo
->base
, size
,
1576 ttm_bo_type_device
, placement
,
1577 0, 0, interruptible
,
1578 NULL
, acc_size
, bo_free
);
1582 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object
*bo
)
1584 struct vmw_user_dma_buffer
*vmw_user_bo
= vmw_user_dma_buffer(bo
);
1585 struct ttm_bo_global
*glob
= bo
->glob
;
1587 ttm_mem_global_free(glob
->mem_glob
, bo
->acc_size
);
1591 static void vmw_user_dmabuf_release(struct ttm_base_object
**p_base
)
1593 struct vmw_user_dma_buffer
*vmw_user_bo
;
1594 struct ttm_base_object
*base
= *p_base
;
1595 struct ttm_buffer_object
*bo
;
1599 if (unlikely(base
== NULL
))
1602 vmw_user_bo
= container_of(base
, struct vmw_user_dma_buffer
, base
);
1603 bo
= &vmw_user_bo
->dma
.base
;
1607 int vmw_dmabuf_alloc_ioctl(struct drm_device
*dev
, void *data
,
1608 struct drm_file
*file_priv
)
1610 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1611 union drm_vmw_alloc_dmabuf_arg
*arg
=
1612 (union drm_vmw_alloc_dmabuf_arg
*)data
;
1613 struct drm_vmw_alloc_dmabuf_req
*req
= &arg
->req
;
1614 struct drm_vmw_dmabuf_rep
*rep
= &arg
->rep
;
1615 struct vmw_user_dma_buffer
*vmw_user_bo
;
1616 struct ttm_buffer_object
*tmp
;
1617 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
1620 vmw_user_bo
= kzalloc(sizeof(*vmw_user_bo
), GFP_KERNEL
);
1621 if (unlikely(vmw_user_bo
== NULL
))
1624 ret
= ttm_read_lock(&vmaster
->lock
, true);
1625 if (unlikely(ret
!= 0)) {
1630 ret
= vmw_dmabuf_init(dev_priv
, &vmw_user_bo
->dma
, req
->size
,
1631 &vmw_vram_sys_placement
, true,
1632 &vmw_user_dmabuf_destroy
);
1633 if (unlikely(ret
!= 0))
1636 tmp
= ttm_bo_reference(&vmw_user_bo
->dma
.base
);
1637 ret
= ttm_base_object_init(vmw_fpriv(file_priv
)->tfile
,
1641 &vmw_user_dmabuf_release
, NULL
);
1642 if (unlikely(ret
!= 0))
1643 goto out_no_base_object
;
1645 rep
->handle
= vmw_user_bo
->base
.hash
.key
;
1646 rep
->map_handle
= vmw_user_bo
->dma
.base
.addr_space_offset
;
1647 rep
->cur_gmr_id
= vmw_user_bo
->base
.hash
.key
;
1648 rep
->cur_gmr_offset
= 0;
1654 ttm_read_unlock(&vmaster
->lock
);
1659 int vmw_dmabuf_unref_ioctl(struct drm_device
*dev
, void *data
,
1660 struct drm_file
*file_priv
)
1662 struct drm_vmw_unref_dmabuf_arg
*arg
=
1663 (struct drm_vmw_unref_dmabuf_arg
*)data
;
1665 return ttm_ref_object_base_unref(vmw_fpriv(file_priv
)->tfile
,
1670 uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object
*bo
,
1671 uint32_t cur_validate_node
)
1673 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
1675 if (likely(vmw_bo
->on_validate_list
))
1676 return vmw_bo
->cur_validate_node
;
1678 vmw_bo
->cur_validate_node
= cur_validate_node
;
1679 vmw_bo
->on_validate_list
= true;
1681 return cur_validate_node
;
1684 void vmw_dmabuf_validate_clear(struct ttm_buffer_object
*bo
)
1686 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
1688 vmw_bo
->on_validate_list
= false;
1691 int vmw_user_dmabuf_lookup(struct ttm_object_file
*tfile
,
1692 uint32_t handle
, struct vmw_dma_buffer
**out
)
1694 struct vmw_user_dma_buffer
*vmw_user_bo
;
1695 struct ttm_base_object
*base
;
1697 base
= ttm_base_object_lookup(tfile
, handle
);
1698 if (unlikely(base
== NULL
)) {
1699 printk(KERN_ERR
"Invalid buffer object handle 0x%08lx.\n",
1700 (unsigned long)handle
);
1704 if (unlikely(base
->object_type
!= ttm_buffer_type
)) {
1705 ttm_base_object_unref(&base
);
1706 printk(KERN_ERR
"Invalid buffer object handle 0x%08lx.\n",
1707 (unsigned long)handle
);
1711 vmw_user_bo
= container_of(base
, struct vmw_user_dma_buffer
, base
);
1712 (void)ttm_bo_reference(&vmw_user_bo
->dma
.base
);
1713 ttm_base_object_unref(&base
);
1714 *out
= &vmw_user_bo
->dma
;
1723 static void vmw_stream_destroy(struct vmw_resource
*res
)
1725 struct vmw_private
*dev_priv
= res
->dev_priv
;
1726 struct vmw_stream
*stream
;
1729 DRM_INFO("%s: unref\n", __func__
);
1730 stream
= container_of(res
, struct vmw_stream
, res
);
1732 ret
= vmw_overlay_unref(dev_priv
, stream
->stream_id
);
1736 static int vmw_stream_init(struct vmw_private
*dev_priv
,
1737 struct vmw_stream
*stream
,
1738 void (*res_free
) (struct vmw_resource
*res
))
1740 struct vmw_resource
*res
= &stream
->res
;
1743 ret
= vmw_resource_init(dev_priv
, res
, &dev_priv
->stream_idr
,
1744 VMW_RES_STREAM
, false, res_free
, NULL
);
1746 if (unlikely(ret
!= 0)) {
1747 if (res_free
== NULL
)
1750 res_free(&stream
->res
);
1754 ret
= vmw_overlay_claim(dev_priv
, &stream
->stream_id
);
1756 vmw_resource_unreference(&res
);
1760 DRM_INFO("%s: claimed\n", __func__
);
1762 vmw_resource_activate(&stream
->res
, vmw_stream_destroy
);
1767 * User-space context management:
1770 static void vmw_user_stream_free(struct vmw_resource
*res
)
1772 struct vmw_user_stream
*stream
=
1773 container_of(res
, struct vmw_user_stream
, stream
.res
);
1774 struct vmw_private
*dev_priv
= res
->dev_priv
;
1777 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
1778 vmw_user_stream_size
);
1782 * This function is called when user space has no more references on the
1783 * base object. It releases the base-object's reference on the resource object.
1786 static void vmw_user_stream_base_release(struct ttm_base_object
**p_base
)
1788 struct ttm_base_object
*base
= *p_base
;
1789 struct vmw_user_stream
*stream
=
1790 container_of(base
, struct vmw_user_stream
, base
);
1791 struct vmw_resource
*res
= &stream
->stream
.res
;
1794 vmw_resource_unreference(&res
);
1797 int vmw_stream_unref_ioctl(struct drm_device
*dev
, void *data
,
1798 struct drm_file
*file_priv
)
1800 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1801 struct vmw_resource
*res
;
1802 struct vmw_user_stream
*stream
;
1803 struct drm_vmw_stream_arg
*arg
= (struct drm_vmw_stream_arg
*)data
;
1804 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1807 res
= vmw_resource_lookup(dev_priv
, &dev_priv
->stream_idr
, arg
->stream_id
);
1808 if (unlikely(res
== NULL
))
1811 if (res
->res_free
!= &vmw_user_stream_free
) {
1816 stream
= container_of(res
, struct vmw_user_stream
, stream
.res
);
1817 if (stream
->base
.tfile
!= tfile
) {
1822 ttm_ref_object_base_unref(tfile
, stream
->base
.hash
.key
, TTM_REF_USAGE
);
1824 vmw_resource_unreference(&res
);
1828 int vmw_stream_claim_ioctl(struct drm_device
*dev
, void *data
,
1829 struct drm_file
*file_priv
)
1831 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1832 struct vmw_user_stream
*stream
;
1833 struct vmw_resource
*res
;
1834 struct vmw_resource
*tmp
;
1835 struct drm_vmw_stream_arg
*arg
= (struct drm_vmw_stream_arg
*)data
;
1836 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1837 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
1841 * Approximate idr memory usage with 128 bytes. It will be limited
1842 * by maximum number_of streams anyway?
1845 if (unlikely(vmw_user_stream_size
== 0))
1846 vmw_user_stream_size
= ttm_round_pot(sizeof(*stream
)) + 128;
1848 ret
= ttm_read_lock(&vmaster
->lock
, true);
1849 if (unlikely(ret
!= 0))
1852 ret
= ttm_mem_global_alloc(vmw_mem_glob(dev_priv
),
1853 vmw_user_stream_size
,
1855 if (unlikely(ret
!= 0)) {
1856 if (ret
!= -ERESTARTSYS
)
1857 DRM_ERROR("Out of graphics memory for stream"
1863 stream
= kmalloc(sizeof(*stream
), GFP_KERNEL
);
1864 if (unlikely(stream
== NULL
)) {
1865 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
1866 vmw_user_stream_size
);
1871 res
= &stream
->stream
.res
;
1872 stream
->base
.shareable
= false;
1873 stream
->base
.tfile
= NULL
;
1876 * From here on, the destructor takes over resource freeing.
1879 ret
= vmw_stream_init(dev_priv
, &stream
->stream
, vmw_user_stream_free
);
1880 if (unlikely(ret
!= 0))
1883 tmp
= vmw_resource_reference(res
);
1884 ret
= ttm_base_object_init(tfile
, &stream
->base
, false, VMW_RES_STREAM
,
1885 &vmw_user_stream_base_release
, NULL
);
1887 if (unlikely(ret
!= 0)) {
1888 vmw_resource_unreference(&tmp
);
1892 arg
->stream_id
= res
->id
;
1894 vmw_resource_unreference(&res
);
1896 ttm_read_unlock(&vmaster
->lock
);
1900 int vmw_user_stream_lookup(struct vmw_private
*dev_priv
,
1901 struct ttm_object_file
*tfile
,
1902 uint32_t *inout_id
, struct vmw_resource
**out
)
1904 struct vmw_user_stream
*stream
;
1905 struct vmw_resource
*res
;
1908 res
= vmw_resource_lookup(dev_priv
, &dev_priv
->stream_idr
, *inout_id
);
1909 if (unlikely(res
== NULL
))
1912 if (res
->res_free
!= &vmw_user_stream_free
) {
1917 stream
= container_of(res
, struct vmw_user_stream
, stream
.res
);
1918 if (stream
->base
.tfile
!= tfile
) {
1923 *inout_id
= stream
->stream
.stream_id
;
1927 vmw_resource_unreference(&res
);