1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_drm.h"
30 #include "ttm/ttm_object.h"
31 #include "ttm/ttm_placement.h"
34 struct vmw_user_context
{
35 struct ttm_base_object base
;
36 struct vmw_resource res
;
39 struct vmw_user_surface
{
40 struct ttm_base_object base
;
41 struct vmw_surface srf
;
45 struct vmw_user_dma_buffer
{
46 struct ttm_base_object base
;
47 struct vmw_dma_buffer dma
;
50 struct vmw_bo_user_rep
{
56 struct vmw_resource res
;
60 struct vmw_user_stream
{
61 struct ttm_base_object base
;
62 struct vmw_stream stream
;
65 struct vmw_surface_offset
{
72 static uint64_t vmw_user_context_size
;
73 static uint64_t vmw_user_surface_size
;
74 static uint64_t vmw_user_stream_size
;
76 static inline struct vmw_dma_buffer
*
77 vmw_dma_buffer(struct ttm_buffer_object
*bo
)
79 return container_of(bo
, struct vmw_dma_buffer
, base
);
82 static inline struct vmw_user_dma_buffer
*
83 vmw_user_dma_buffer(struct ttm_buffer_object
*bo
)
85 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
86 return container_of(vmw_bo
, struct vmw_user_dma_buffer
, dma
);
89 struct vmw_resource
*vmw_resource_reference(struct vmw_resource
*res
)
97 * vmw_resource_release_id - release a resource id to the id manager.
99 * @res: Pointer to the resource.
101 * Release the resource id to the resource id manager and set it to -1
103 static void vmw_resource_release_id(struct vmw_resource
*res
)
105 struct vmw_private
*dev_priv
= res
->dev_priv
;
107 write_lock(&dev_priv
->resource_lock
);
109 idr_remove(res
->idr
, res
->id
);
111 write_unlock(&dev_priv
->resource_lock
);
114 static void vmw_resource_release(struct kref
*kref
)
116 struct vmw_resource
*res
=
117 container_of(kref
, struct vmw_resource
, kref
);
118 struct vmw_private
*dev_priv
= res
->dev_priv
;
120 struct idr
*idr
= res
->idr
;
123 if (res
->remove_from_lists
!= NULL
)
124 res
->remove_from_lists(res
);
125 write_unlock(&dev_priv
->resource_lock
);
127 if (likely(res
->hw_destroy
!= NULL
))
128 res
->hw_destroy(res
);
130 if (res
->res_free
!= NULL
)
135 write_lock(&dev_priv
->resource_lock
);
141 void vmw_resource_unreference(struct vmw_resource
**p_res
)
143 struct vmw_resource
*res
= *p_res
;
144 struct vmw_private
*dev_priv
= res
->dev_priv
;
147 write_lock(&dev_priv
->resource_lock
);
148 kref_put(&res
->kref
, vmw_resource_release
);
149 write_unlock(&dev_priv
->resource_lock
);
154 * vmw_resource_alloc_id - release a resource id to the id manager.
156 * @dev_priv: Pointer to the device private structure.
157 * @res: Pointer to the resource.
159 * Allocate the lowest free resource from the resource manager, and set
160 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
162 static int vmw_resource_alloc_id(struct vmw_private
*dev_priv
,
163 struct vmw_resource
*res
)
167 BUG_ON(res
->id
!= -1);
170 if (unlikely(idr_pre_get(res
->idr
, GFP_KERNEL
) == 0))
173 write_lock(&dev_priv
->resource_lock
);
174 ret
= idr_get_new_above(res
->idr
, res
, 1, &res
->id
);
175 write_unlock(&dev_priv
->resource_lock
);
177 } while (ret
== -EAGAIN
);
183 static int vmw_resource_init(struct vmw_private
*dev_priv
,
184 struct vmw_resource
*res
,
186 enum ttm_object_type obj_type
,
188 void (*res_free
) (struct vmw_resource
*res
),
189 void (*remove_from_lists
)
190 (struct vmw_resource
*res
))
192 kref_init(&res
->kref
);
193 res
->hw_destroy
= NULL
;
194 res
->res_free
= res_free
;
195 res
->remove_from_lists
= remove_from_lists
;
196 res
->res_type
= obj_type
;
199 res
->dev_priv
= dev_priv
;
200 INIT_LIST_HEAD(&res
->query_head
);
201 INIT_LIST_HEAD(&res
->validate_head
);
206 return vmw_resource_alloc_id(dev_priv
, res
);
210 * vmw_resource_activate
212 * @res: Pointer to the newly created resource
213 * @hw_destroy: Destroy function. NULL if none.
215 * Activate a resource after the hardware has been made aware of it.
216 * Set tye destroy function to @destroy. Typically this frees the
217 * resource and destroys the hardware resources associated with it.
218 * Activate basically means that the function vmw_resource_lookup will
222 static void vmw_resource_activate(struct vmw_resource
*res
,
223 void (*hw_destroy
) (struct vmw_resource
*))
225 struct vmw_private
*dev_priv
= res
->dev_priv
;
227 write_lock(&dev_priv
->resource_lock
);
229 res
->hw_destroy
= hw_destroy
;
230 write_unlock(&dev_priv
->resource_lock
);
233 struct vmw_resource
*vmw_resource_lookup(struct vmw_private
*dev_priv
,
234 struct idr
*idr
, int id
)
236 struct vmw_resource
*res
;
238 read_lock(&dev_priv
->resource_lock
);
239 res
= idr_find(idr
, id
);
240 if (res
&& res
->avail
)
241 kref_get(&res
->kref
);
244 read_unlock(&dev_priv
->resource_lock
);
246 if (unlikely(res
== NULL
))
253 * Context management:
256 static void vmw_hw_context_destroy(struct vmw_resource
*res
)
259 struct vmw_private
*dev_priv
= res
->dev_priv
;
261 SVGA3dCmdHeader header
;
262 SVGA3dCmdDestroyContext body
;
266 vmw_execbuf_release_pinned_bo(dev_priv
, true, res
->id
);
268 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
269 if (unlikely(cmd
== NULL
)) {
270 DRM_ERROR("Failed reserving FIFO space for surface "
275 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY
);
276 cmd
->header
.size
= cpu_to_le32(sizeof(cmd
->body
));
277 cmd
->body
.cid
= cpu_to_le32(res
->id
);
279 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
280 vmw_3d_resource_dec(dev_priv
, false);
283 static int vmw_context_init(struct vmw_private
*dev_priv
,
284 struct vmw_resource
*res
,
285 void (*res_free
) (struct vmw_resource
*res
))
290 SVGA3dCmdHeader header
;
291 SVGA3dCmdDefineContext body
;
294 ret
= vmw_resource_init(dev_priv
, res
, &dev_priv
->context_idr
,
295 VMW_RES_CONTEXT
, false, res_free
, NULL
);
297 if (unlikely(ret
!= 0)) {
298 DRM_ERROR("Failed to allocate a resource id.\n");
302 if (unlikely(res
->id
>= SVGA3D_MAX_CONTEXT_IDS
)) {
303 DRM_ERROR("Out of hw context ids.\n");
304 vmw_resource_unreference(&res
);
308 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
309 if (unlikely(cmd
== NULL
)) {
310 DRM_ERROR("Fifo reserve failed.\n");
311 vmw_resource_unreference(&res
);
315 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE
);
316 cmd
->header
.size
= cpu_to_le32(sizeof(cmd
->body
));
317 cmd
->body
.cid
= cpu_to_le32(res
->id
);
319 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
320 (void) vmw_3d_resource_inc(dev_priv
, false);
321 vmw_resource_activate(res
, vmw_hw_context_destroy
);
325 if (res_free
== NULL
)
332 struct vmw_resource
*vmw_context_alloc(struct vmw_private
*dev_priv
)
334 struct vmw_resource
*res
= kmalloc(sizeof(*res
), GFP_KERNEL
);
337 if (unlikely(res
== NULL
))
340 ret
= vmw_context_init(dev_priv
, res
, NULL
);
341 return (ret
== 0) ? res
: NULL
;
345 * User-space context management:
348 static void vmw_user_context_free(struct vmw_resource
*res
)
350 struct vmw_user_context
*ctx
=
351 container_of(res
, struct vmw_user_context
, res
);
352 struct vmw_private
*dev_priv
= res
->dev_priv
;
355 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
356 vmw_user_context_size
);
360 * This function is called when user space has no more references on the
361 * base object. It releases the base-object's reference on the resource object.
364 static void vmw_user_context_base_release(struct ttm_base_object
**p_base
)
366 struct ttm_base_object
*base
= *p_base
;
367 struct vmw_user_context
*ctx
=
368 container_of(base
, struct vmw_user_context
, base
);
369 struct vmw_resource
*res
= &ctx
->res
;
372 vmw_resource_unreference(&res
);
375 int vmw_context_destroy_ioctl(struct drm_device
*dev
, void *data
,
376 struct drm_file
*file_priv
)
378 struct vmw_private
*dev_priv
= vmw_priv(dev
);
379 struct vmw_resource
*res
;
380 struct vmw_user_context
*ctx
;
381 struct drm_vmw_context_arg
*arg
= (struct drm_vmw_context_arg
*)data
;
382 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
385 res
= vmw_resource_lookup(dev_priv
, &dev_priv
->context_idr
, arg
->cid
);
386 if (unlikely(res
== NULL
))
389 if (res
->res_free
!= &vmw_user_context_free
) {
394 ctx
= container_of(res
, struct vmw_user_context
, res
);
395 if (ctx
->base
.tfile
!= tfile
&& !ctx
->base
.shareable
) {
400 ttm_ref_object_base_unref(tfile
, ctx
->base
.hash
.key
, TTM_REF_USAGE
);
402 vmw_resource_unreference(&res
);
406 int vmw_context_define_ioctl(struct drm_device
*dev
, void *data
,
407 struct drm_file
*file_priv
)
409 struct vmw_private
*dev_priv
= vmw_priv(dev
);
410 struct vmw_user_context
*ctx
;
411 struct vmw_resource
*res
;
412 struct vmw_resource
*tmp
;
413 struct drm_vmw_context_arg
*arg
= (struct drm_vmw_context_arg
*)data
;
414 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
415 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
420 * Approximate idr memory usage with 128 bytes. It will be limited
421 * by maximum number_of contexts anyway.
424 if (unlikely(vmw_user_context_size
== 0))
425 vmw_user_context_size
= ttm_round_pot(sizeof(*ctx
)) + 128;
427 ret
= ttm_read_lock(&vmaster
->lock
, true);
428 if (unlikely(ret
!= 0))
431 ret
= ttm_mem_global_alloc(vmw_mem_glob(dev_priv
),
432 vmw_user_context_size
,
434 if (unlikely(ret
!= 0)) {
435 if (ret
!= -ERESTARTSYS
)
436 DRM_ERROR("Out of graphics memory for context"
441 ctx
= kmalloc(sizeof(*ctx
), GFP_KERNEL
);
442 if (unlikely(ctx
== NULL
)) {
443 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
444 vmw_user_context_size
);
450 ctx
->base
.shareable
= false;
451 ctx
->base
.tfile
= NULL
;
454 * From here on, the destructor takes over resource freeing.
457 ret
= vmw_context_init(dev_priv
, res
, vmw_user_context_free
);
458 if (unlikely(ret
!= 0))
461 tmp
= vmw_resource_reference(&ctx
->res
);
462 ret
= ttm_base_object_init(tfile
, &ctx
->base
, false, VMW_RES_CONTEXT
,
463 &vmw_user_context_base_release
, NULL
);
465 if (unlikely(ret
!= 0)) {
466 vmw_resource_unreference(&tmp
);
472 vmw_resource_unreference(&res
);
474 ttm_read_unlock(&vmaster
->lock
);
479 int vmw_context_check(struct vmw_private
*dev_priv
,
480 struct ttm_object_file
*tfile
,
482 struct vmw_resource
**p_res
)
484 struct vmw_resource
*res
;
487 read_lock(&dev_priv
->resource_lock
);
488 res
= idr_find(&dev_priv
->context_idr
, id
);
489 if (res
&& res
->avail
) {
490 struct vmw_user_context
*ctx
=
491 container_of(res
, struct vmw_user_context
, res
);
492 if (ctx
->base
.tfile
!= tfile
&& !ctx
->base
.shareable
)
495 *p_res
= vmw_resource_reference(res
);
498 read_unlock(&dev_priv
->resource_lock
);
509 * Size table for the supported SVGA3D surface formats. It consists of
510 * two values. The bpp value and the s_bpp value which is short for
511 * "stride bits per pixel" The values are given in such a way that the
512 * minimum stride for the image is calculated using
514 * min_stride = w*s_bpp
516 * and the total memory requirement for the image is
518 * h*min_stride*bpp/s_bpp
521 static const struct vmw_bpp vmw_sf_bpp
[] = {
522 [SVGA3D_FORMAT_INVALID
] = {0, 0},
523 [SVGA3D_X8R8G8B8
] = {32, 32},
524 [SVGA3D_A8R8G8B8
] = {32, 32},
525 [SVGA3D_R5G6B5
] = {16, 16},
526 [SVGA3D_X1R5G5B5
] = {16, 16},
527 [SVGA3D_A1R5G5B5
] = {16, 16},
528 [SVGA3D_A4R4G4B4
] = {16, 16},
529 [SVGA3D_Z_D32
] = {32, 32},
530 [SVGA3D_Z_D16
] = {16, 16},
531 [SVGA3D_Z_D24S8
] = {32, 32},
532 [SVGA3D_Z_D15S1
] = {16, 16},
533 [SVGA3D_LUMINANCE8
] = {8, 8},
534 [SVGA3D_LUMINANCE4_ALPHA4
] = {8, 8},
535 [SVGA3D_LUMINANCE16
] = {16, 16},
536 [SVGA3D_LUMINANCE8_ALPHA8
] = {16, 16},
537 [SVGA3D_DXT1
] = {4, 16},
538 [SVGA3D_DXT2
] = {8, 32},
539 [SVGA3D_DXT3
] = {8, 32},
540 [SVGA3D_DXT4
] = {8, 32},
541 [SVGA3D_DXT5
] = {8, 32},
542 [SVGA3D_BUMPU8V8
] = {16, 16},
543 [SVGA3D_BUMPL6V5U5
] = {16, 16},
544 [SVGA3D_BUMPX8L8V8U8
] = {32, 32},
545 [SVGA3D_ARGB_S10E5
] = {16, 16},
546 [SVGA3D_ARGB_S23E8
] = {32, 32},
547 [SVGA3D_A2R10G10B10
] = {32, 32},
548 [SVGA3D_V8U8
] = {16, 16},
549 [SVGA3D_Q8W8V8U8
] = {32, 32},
550 [SVGA3D_CxV8U8
] = {16, 16},
551 [SVGA3D_X8L8V8U8
] = {32, 32},
552 [SVGA3D_A2W10V10U10
] = {32, 32},
553 [SVGA3D_ALPHA8
] = {8, 8},
554 [SVGA3D_R_S10E5
] = {16, 16},
555 [SVGA3D_R_S23E8
] = {32, 32},
556 [SVGA3D_RG_S10E5
] = {16, 16},
557 [SVGA3D_RG_S23E8
] = {32, 32},
558 [SVGA3D_BUFFER
] = {8, 8},
559 [SVGA3D_Z_D24X8
] = {32, 32},
560 [SVGA3D_V16U16
] = {32, 32},
561 [SVGA3D_G16R16
] = {32, 32},
562 [SVGA3D_A16B16G16R16
] = {64, 64},
563 [SVGA3D_UYVY
] = {12, 12},
564 [SVGA3D_YUY2
] = {12, 12},
565 [SVGA3D_NV12
] = {12, 8},
566 [SVGA3D_AYUV
] = {32, 32},
567 [SVGA3D_BC4_UNORM
] = {4, 16},
568 [SVGA3D_BC5_UNORM
] = {8, 32},
569 [SVGA3D_Z_DF16
] = {16, 16},
570 [SVGA3D_Z_DF24
] = {24, 24},
571 [SVGA3D_Z_D24S8_INT
] = {32, 32}
576 * Surface management.
579 struct vmw_surface_dma
{
580 SVGA3dCmdHeader header
;
581 SVGA3dCmdSurfaceDMA body
;
583 SVGA3dCmdSurfaceDMASuffix suffix
;
586 struct vmw_surface_define
{
587 SVGA3dCmdHeader header
;
588 SVGA3dCmdDefineSurface body
;
591 struct vmw_surface_destroy
{
592 SVGA3dCmdHeader header
;
593 SVGA3dCmdDestroySurface body
;
598 * vmw_surface_dma_size - Compute fifo size for a dma command.
600 * @srf: Pointer to a struct vmw_surface
602 * Computes the required size for a surface dma command for backup or
603 * restoration of the surface represented by @srf.
605 static inline uint32_t vmw_surface_dma_size(const struct vmw_surface
*srf
)
607 return srf
->num_sizes
* sizeof(struct vmw_surface_dma
);
612 * vmw_surface_define_size - Compute fifo size for a surface define command.
614 * @srf: Pointer to a struct vmw_surface
616 * Computes the required size for a surface define command for the definition
617 * of the surface represented by @srf.
619 static inline uint32_t vmw_surface_define_size(const struct vmw_surface
*srf
)
621 return sizeof(struct vmw_surface_define
) + srf
->num_sizes
*
627 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
629 * Computes the required size for a surface destroy command for the destruction
632 static inline uint32_t vmw_surface_destroy_size(void)
634 return sizeof(struct vmw_surface_destroy
);
638 * vmw_surface_destroy_encode - Encode a surface_destroy command.
640 * @id: The surface id
641 * @cmd_space: Pointer to memory area in which the commands should be encoded.
643 static void vmw_surface_destroy_encode(uint32_t id
,
646 struct vmw_surface_destroy
*cmd
= (struct vmw_surface_destroy
*)
649 cmd
->header
.id
= SVGA_3D_CMD_SURFACE_DESTROY
;
650 cmd
->header
.size
= sizeof(cmd
->body
);
655 * vmw_surface_define_encode - Encode a surface_define command.
657 * @srf: Pointer to a struct vmw_surface object.
658 * @cmd_space: Pointer to memory area in which the commands should be encoded.
660 static void vmw_surface_define_encode(const struct vmw_surface
*srf
,
663 struct vmw_surface_define
*cmd
= (struct vmw_surface_define
*)
665 struct drm_vmw_size
*src_size
;
666 SVGA3dSize
*cmd_size
;
670 cmd_len
= sizeof(cmd
->body
) + srf
->num_sizes
* sizeof(SVGA3dSize
);
672 cmd
->header
.id
= SVGA_3D_CMD_SURFACE_DEFINE
;
673 cmd
->header
.size
= cmd_len
;
674 cmd
->body
.sid
= srf
->res
.id
;
675 cmd
->body
.surfaceFlags
= srf
->flags
;
676 cmd
->body
.format
= cpu_to_le32(srf
->format
);
677 for (i
= 0; i
< DRM_VMW_MAX_SURFACE_FACES
; ++i
)
678 cmd
->body
.face
[i
].numMipLevels
= srf
->mip_levels
[i
];
681 cmd_size
= (SVGA3dSize
*) cmd
;
682 src_size
= srf
->sizes
;
684 for (i
= 0; i
< srf
->num_sizes
; ++i
, cmd_size
++, src_size
++) {
685 cmd_size
->width
= src_size
->width
;
686 cmd_size
->height
= src_size
->height
;
687 cmd_size
->depth
= src_size
->depth
;
693 * vmw_surface_dma_encode - Encode a surface_dma command.
695 * @srf: Pointer to a struct vmw_surface object.
696 * @cmd_space: Pointer to memory area in which the commands should be encoded.
697 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
698 * should be placed or read from.
699 * @to_surface: Boolean whether to DMA to the surface or from the surface.
701 static void vmw_surface_dma_encode(struct vmw_surface
*srf
,
703 const SVGAGuestPtr
*ptr
,
707 uint32_t bpp
= vmw_sf_bpp
[srf
->format
].bpp
;
708 uint32_t stride_bpp
= vmw_sf_bpp
[srf
->format
].s_bpp
;
709 struct vmw_surface_dma
*cmd
= (struct vmw_surface_dma
*)cmd_space
;
711 for (i
= 0; i
< srf
->num_sizes
; ++i
) {
712 SVGA3dCmdHeader
*header
= &cmd
->header
;
713 SVGA3dCmdSurfaceDMA
*body
= &cmd
->body
;
714 SVGA3dCopyBox
*cb
= &cmd
->cb
;
715 SVGA3dCmdSurfaceDMASuffix
*suffix
= &cmd
->suffix
;
716 const struct vmw_surface_offset
*cur_offset
= &srf
->offsets
[i
];
717 const struct drm_vmw_size
*cur_size
= &srf
->sizes
[i
];
719 header
->id
= SVGA_3D_CMD_SURFACE_DMA
;
720 header
->size
= sizeof(*body
) + sizeof(*cb
) + sizeof(*suffix
);
722 body
->guest
.ptr
= *ptr
;
723 body
->guest
.ptr
.offset
+= cur_offset
->bo_offset
;
724 body
->guest
.pitch
= (cur_size
->width
* stride_bpp
+ 7) >> 3;
725 body
->host
.sid
= srf
->res
.id
;
726 body
->host
.face
= cur_offset
->face
;
727 body
->host
.mipmap
= cur_offset
->mip
;
728 body
->transfer
= ((to_surface
) ? SVGA3D_WRITE_HOST_VRAM
:
729 SVGA3D_READ_HOST_VRAM
);
736 cb
->w
= cur_size
->width
;
737 cb
->h
= cur_size
->height
;
738 cb
->d
= cur_size
->depth
;
740 suffix
->suffixSize
= sizeof(*suffix
);
741 suffix
->maximumOffset
= body
->guest
.pitch
*cur_size
->height
*
742 cur_size
->depth
*bpp
/ stride_bpp
;
743 suffix
->flags
.discard
= 0;
744 suffix
->flags
.unsynchronized
= 0;
745 suffix
->flags
.reserved
= 0;
751 static void vmw_hw_surface_destroy(struct vmw_resource
*res
)
754 struct vmw_private
*dev_priv
= res
->dev_priv
;
755 struct vmw_surface
*srf
;
760 cmd
= vmw_fifo_reserve(dev_priv
, vmw_surface_destroy_size());
761 if (unlikely(cmd
== NULL
)) {
762 DRM_ERROR("Failed reserving FIFO space for surface "
767 vmw_surface_destroy_encode(res
->id
, cmd
);
768 vmw_fifo_commit(dev_priv
, vmw_surface_destroy_size());
771 * used_memory_size_atomic, or separate lock
772 * to avoid taking dev_priv::cmdbuf_mutex in
776 mutex_lock(&dev_priv
->cmdbuf_mutex
);
777 srf
= container_of(res
, struct vmw_surface
, res
);
778 dev_priv
->used_memory_size
-= srf
->backup_size
;
779 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
782 vmw_3d_resource_dec(dev_priv
, false);
785 void vmw_surface_res_free(struct vmw_resource
*res
)
787 struct vmw_surface
*srf
= container_of(res
, struct vmw_surface
, res
);
790 ttm_bo_unref(&srf
->backup
);
793 kfree(srf
->snooper
.image
);
799 * vmw_surface_do_validate - make a surface available to the device.
801 * @dev_priv: Pointer to a device private struct.
802 * @srf: Pointer to a struct vmw_surface.
804 * If the surface doesn't have a hw id, allocate one, and optionally
805 * DMA the backed up surface contents to the device.
807 * Returns -EBUSY if there wasn't sufficient device resources to
808 * complete the validation. Retry after freeing up resources.
810 * May return other errors if the kernel is out of guest resources.
812 int vmw_surface_do_validate(struct vmw_private
*dev_priv
,
813 struct vmw_surface
*srf
)
815 struct vmw_resource
*res
= &srf
->res
;
816 struct list_head val_list
;
817 struct ttm_validate_buffer val_buf
;
818 uint32_t submit_size
;
822 if (likely(res
->id
!= -1))
825 if (unlikely(dev_priv
->used_memory_size
+ srf
->backup_size
>=
826 dev_priv
->memory_size
))
830 * Reserve- and validate the backup DMA bo.
834 INIT_LIST_HEAD(&val_list
);
835 val_buf
.bo
= ttm_bo_reference(srf
->backup
);
836 val_buf
.new_sync_obj_arg
= (void *)((unsigned long)
837 DRM_VMW_FENCE_FLAG_EXEC
);
838 list_add_tail(&val_buf
.head
, &val_list
);
839 ret
= ttm_eu_reserve_buffers(&val_list
);
840 if (unlikely(ret
!= 0))
843 ret
= ttm_bo_validate(srf
->backup
, &vmw_srf_placement
,
845 if (unlikely(ret
!= 0))
846 goto out_no_validate
;
850 * Alloc id for the resource.
853 ret
= vmw_resource_alloc_id(dev_priv
, res
);
854 if (unlikely(ret
!= 0)) {
855 DRM_ERROR("Failed to allocate a surface id.\n");
858 if (unlikely(res
->id
>= SVGA3D_MAX_SURFACE_IDS
)) {
865 * Encode surface define- and dma commands.
868 submit_size
= vmw_surface_define_size(srf
);
870 submit_size
+= vmw_surface_dma_size(srf
);
872 cmd
= vmw_fifo_reserve(dev_priv
, submit_size
);
873 if (unlikely(cmd
== NULL
)) {
874 DRM_ERROR("Failed reserving FIFO space for surface "
880 vmw_surface_define_encode(srf
, cmd
);
884 cmd
+= vmw_surface_define_size(srf
);
885 vmw_bo_get_guest_ptr(srf
->backup
, &ptr
);
886 vmw_surface_dma_encode(srf
, cmd
, &ptr
, true);
889 vmw_fifo_commit(dev_priv
, submit_size
);
892 * Create a fence object and fence the backup buffer.
896 struct vmw_fence_obj
*fence
;
898 (void) vmw_execbuf_fence_commands(NULL
, dev_priv
,
900 ttm_eu_fence_buffer_objects(&val_list
, fence
);
901 if (likely(fence
!= NULL
))
902 vmw_fence_obj_unreference(&fence
);
903 ttm_bo_unref(&val_buf
.bo
);
904 ttm_bo_unref(&srf
->backup
);
908 * Surface memory usage accounting.
911 dev_priv
->used_memory_size
+= srf
->backup_size
;
916 vmw_resource_release_id(res
);
920 ttm_eu_backoff_reservation(&val_list
);
923 ttm_bo_unref(&val_buf
.bo
);
928 * vmw_surface_evict - Evict a hw surface.
930 * @dev_priv: Pointer to a device private struct.
931 * @srf: Pointer to a struct vmw_surface
933 * DMA the contents of a hw surface to a backup guest buffer object,
934 * and destroy the hw surface, releasing its id.
936 int vmw_surface_evict(struct vmw_private
*dev_priv
,
937 struct vmw_surface
*srf
)
939 struct vmw_resource
*res
= &srf
->res
;
940 struct list_head val_list
;
941 struct ttm_validate_buffer val_buf
;
942 uint32_t submit_size
;
945 struct vmw_fence_obj
*fence
;
948 BUG_ON(res
->id
== -1);
951 * Create a surface backup buffer object.
955 ret
= ttm_bo_create(&dev_priv
->bdev
, srf
->backup_size
,
957 &vmw_srf_placement
, 0, 0, true,
959 if (unlikely(ret
!= 0))
964 * Reserve- and validate the backup DMA bo.
967 INIT_LIST_HEAD(&val_list
);
968 val_buf
.bo
= ttm_bo_reference(srf
->backup
);
969 val_buf
.new_sync_obj_arg
= (void *)(unsigned long)
970 DRM_VMW_FENCE_FLAG_EXEC
;
971 list_add_tail(&val_buf
.head
, &val_list
);
972 ret
= ttm_eu_reserve_buffers(&val_list
);
973 if (unlikely(ret
!= 0))
976 ret
= ttm_bo_validate(srf
->backup
, &vmw_srf_placement
,
978 if (unlikely(ret
!= 0))
979 goto out_no_validate
;
983 * Encode the dma- and surface destroy commands.
986 submit_size
= vmw_surface_dma_size(srf
) + vmw_surface_destroy_size();
987 cmd
= vmw_fifo_reserve(dev_priv
, submit_size
);
988 if (unlikely(cmd
== NULL
)) {
989 DRM_ERROR("Failed reserving FIFO space for surface "
995 vmw_bo_get_guest_ptr(srf
->backup
, &ptr
);
996 vmw_surface_dma_encode(srf
, cmd
, &ptr
, false);
997 cmd
+= vmw_surface_dma_size(srf
);
998 vmw_surface_destroy_encode(res
->id
, cmd
);
999 vmw_fifo_commit(dev_priv
, submit_size
);
1002 * Surface memory usage accounting.
1005 dev_priv
->used_memory_size
-= srf
->backup_size
;
1008 * Create a fence object and fence the DMA buffer.
1011 (void) vmw_execbuf_fence_commands(NULL
, dev_priv
,
1013 ttm_eu_fence_buffer_objects(&val_list
, fence
);
1014 if (likely(fence
!= NULL
))
1015 vmw_fence_obj_unreference(&fence
);
1016 ttm_bo_unref(&val_buf
.bo
);
1019 * Release the surface ID.
1022 vmw_resource_release_id(res
);
1029 ttm_eu_backoff_reservation(&val_list
);
1031 ttm_bo_unref(&val_buf
.bo
);
1032 ttm_bo_unref(&srf
->backup
);
1038 * vmw_surface_validate - make a surface available to the device, evicting
1039 * other surfaces if needed.
1041 * @dev_priv: Pointer to a device private struct.
1042 * @srf: Pointer to a struct vmw_surface.
1044 * Try to validate a surface and if it fails due to limited device resources,
1045 * repeatedly try to evict other surfaces until the request can be
1048 * May return errors if out of resources.
1050 int vmw_surface_validate(struct vmw_private
*dev_priv
,
1051 struct vmw_surface
*srf
)
1054 struct vmw_surface
*evict_srf
;
1057 write_lock(&dev_priv
->resource_lock
);
1058 list_del_init(&srf
->lru_head
);
1059 write_unlock(&dev_priv
->resource_lock
);
1061 ret
= vmw_surface_do_validate(dev_priv
, srf
);
1062 if (likely(ret
!= -EBUSY
))
1065 write_lock(&dev_priv
->resource_lock
);
1066 if (list_empty(&dev_priv
->surface_lru
)) {
1067 DRM_ERROR("Out of device memory for surfaces.\n");
1069 write_unlock(&dev_priv
->resource_lock
);
1073 evict_srf
= vmw_surface_reference
1074 (list_first_entry(&dev_priv
->surface_lru
,
1077 list_del_init(&evict_srf
->lru_head
);
1079 write_unlock(&dev_priv
->resource_lock
);
1080 (void) vmw_surface_evict(dev_priv
, evict_srf
);
1082 vmw_surface_unreference(&evict_srf
);
1086 if (unlikely(ret
!= 0 && srf
->res
.id
!= -1)) {
1087 write_lock(&dev_priv
->resource_lock
);
1088 list_add_tail(&srf
->lru_head
, &dev_priv
->surface_lru
);
1089 write_unlock(&dev_priv
->resource_lock
);
1097 * vmw_surface_remove_from_lists - Remove surface resources from lookup lists
1099 * @res: Pointer to a struct vmw_resource embedded in a struct vmw_surface
1101 * As part of the resource destruction, remove the surface from any
1104 static void vmw_surface_remove_from_lists(struct vmw_resource
*res
)
1106 struct vmw_surface
*srf
= container_of(res
, struct vmw_surface
, res
);
1108 list_del_init(&srf
->lru_head
);
1111 int vmw_surface_init(struct vmw_private
*dev_priv
,
1112 struct vmw_surface
*srf
,
1113 void (*res_free
) (struct vmw_resource
*res
))
1116 struct vmw_resource
*res
= &srf
->res
;
1118 BUG_ON(res_free
== NULL
);
1119 INIT_LIST_HEAD(&srf
->lru_head
);
1120 ret
= vmw_resource_init(dev_priv
, res
, &dev_priv
->surface_idr
,
1121 VMW_RES_SURFACE
, true, res_free
,
1122 vmw_surface_remove_from_lists
);
1124 if (unlikely(ret
!= 0))
1128 * The surface won't be visible to hardware until a
1132 (void) vmw_3d_resource_inc(dev_priv
, false);
1133 vmw_resource_activate(res
, vmw_hw_surface_destroy
);
1137 static void vmw_user_surface_free(struct vmw_resource
*res
)
1139 struct vmw_surface
*srf
= container_of(res
, struct vmw_surface
, res
);
1140 struct vmw_user_surface
*user_srf
=
1141 container_of(srf
, struct vmw_user_surface
, srf
);
1142 struct vmw_private
*dev_priv
= srf
->res
.dev_priv
;
1143 uint32_t size
= user_srf
->size
;
1146 ttm_bo_unref(&srf
->backup
);
1147 kfree(srf
->offsets
);
1149 kfree(srf
->snooper
.image
);
1151 ttm_mem_global_free(vmw_mem_glob(dev_priv
), size
);
1155 * vmw_resource_unreserve - unreserve resources previously reserved for
1156 * command submission.
1158 * @list_head: list of resources to unreserve.
1160 * Currently only surfaces are considered, and unreserving a surface
1161 * means putting it back on the device's surface lru list,
1162 * so that it can be evicted if necessary.
1163 * This function traverses the resource list and
1164 * checks whether resources are surfaces, and in that case puts them back
1165 * on the device's surface LRU list.
1167 void vmw_resource_unreserve(struct list_head
*list
)
1169 struct vmw_resource
*res
;
1170 struct vmw_surface
*srf
;
1171 rwlock_t
*lock
= NULL
;
1173 list_for_each_entry(res
, list
, validate_head
) {
1175 if (res
->res_free
!= &vmw_surface_res_free
&&
1176 res
->res_free
!= &vmw_user_surface_free
)
1179 if (unlikely(lock
== NULL
)) {
1180 lock
= &res
->dev_priv
->resource_lock
;
1184 srf
= container_of(res
, struct vmw_surface
, res
);
1185 list_del_init(&srf
->lru_head
);
1186 list_add_tail(&srf
->lru_head
, &res
->dev_priv
->surface_lru
);
1194 * Helper function that looks either a surface or dmabuf.
1196 * The pointer this pointed at by out_surf and out_buf needs to be null.
1198 int vmw_user_lookup_handle(struct vmw_private
*dev_priv
,
1199 struct ttm_object_file
*tfile
,
1201 struct vmw_surface
**out_surf
,
1202 struct vmw_dma_buffer
**out_buf
)
1206 BUG_ON(*out_surf
|| *out_buf
);
1208 ret
= vmw_user_surface_lookup_handle(dev_priv
, tfile
, handle
, out_surf
);
1212 ret
= vmw_user_dmabuf_lookup(tfile
, handle
, out_buf
);
1217 int vmw_user_surface_lookup_handle(struct vmw_private
*dev_priv
,
1218 struct ttm_object_file
*tfile
,
1219 uint32_t handle
, struct vmw_surface
**out
)
1221 struct vmw_resource
*res
;
1222 struct vmw_surface
*srf
;
1223 struct vmw_user_surface
*user_srf
;
1224 struct ttm_base_object
*base
;
1227 base
= ttm_base_object_lookup(tfile
, handle
);
1228 if (unlikely(base
== NULL
))
1231 if (unlikely(base
->object_type
!= VMW_RES_SURFACE
))
1232 goto out_bad_resource
;
1234 user_srf
= container_of(base
, struct vmw_user_surface
, base
);
1235 srf
= &user_srf
->srf
;
1238 read_lock(&dev_priv
->resource_lock
);
1240 if (!res
->avail
|| res
->res_free
!= &vmw_user_surface_free
) {
1241 read_unlock(&dev_priv
->resource_lock
);
1242 goto out_bad_resource
;
1245 kref_get(&res
->kref
);
1246 read_unlock(&dev_priv
->resource_lock
);
1252 ttm_base_object_unref(&base
);
1257 static void vmw_user_surface_base_release(struct ttm_base_object
**p_base
)
1259 struct ttm_base_object
*base
= *p_base
;
1260 struct vmw_user_surface
*user_srf
=
1261 container_of(base
, struct vmw_user_surface
, base
);
1262 struct vmw_resource
*res
= &user_srf
->srf
.res
;
1265 vmw_resource_unreference(&res
);
1268 int vmw_surface_destroy_ioctl(struct drm_device
*dev
, void *data
,
1269 struct drm_file
*file_priv
)
1271 struct drm_vmw_surface_arg
*arg
= (struct drm_vmw_surface_arg
*)data
;
1272 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1274 return ttm_ref_object_base_unref(tfile
, arg
->sid
, TTM_REF_USAGE
);
1277 int vmw_surface_define_ioctl(struct drm_device
*dev
, void *data
,
1278 struct drm_file
*file_priv
)
1280 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1281 struct vmw_user_surface
*user_srf
;
1282 struct vmw_surface
*srf
;
1283 struct vmw_resource
*res
;
1284 struct vmw_resource
*tmp
;
1285 union drm_vmw_surface_create_arg
*arg
=
1286 (union drm_vmw_surface_create_arg
*)data
;
1287 struct drm_vmw_surface_create_req
*req
= &arg
->req
;
1288 struct drm_vmw_surface_arg
*rep
= &arg
->rep
;
1289 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1290 struct drm_vmw_size __user
*user_sizes
;
1293 uint32_t cur_bo_offset
;
1294 struct drm_vmw_size
*cur_size
;
1295 struct vmw_surface_offset
*cur_offset
;
1296 uint32_t stride_bpp
;
1300 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
1302 if (unlikely(vmw_user_surface_size
== 0))
1303 vmw_user_surface_size
= ttm_round_pot(sizeof(*user_srf
)) +
1307 for (i
= 0; i
< DRM_VMW_MAX_SURFACE_FACES
; ++i
)
1308 num_sizes
+= req
->mip_levels
[i
];
1310 if (num_sizes
> DRM_VMW_MAX_SURFACE_FACES
*
1311 DRM_VMW_MAX_MIP_LEVELS
)
1314 size
= vmw_user_surface_size
+ 128 +
1315 ttm_round_pot(num_sizes
* sizeof(struct drm_vmw_size
)) +
1316 ttm_round_pot(num_sizes
* sizeof(struct vmw_surface_offset
));
1319 ret
= ttm_read_lock(&vmaster
->lock
, true);
1320 if (unlikely(ret
!= 0))
1323 ret
= ttm_mem_global_alloc(vmw_mem_glob(dev_priv
),
1325 if (unlikely(ret
!= 0)) {
1326 if (ret
!= -ERESTARTSYS
)
1327 DRM_ERROR("Out of graphics memory for surface"
1332 user_srf
= kmalloc(sizeof(*user_srf
), GFP_KERNEL
);
1333 if (unlikely(user_srf
== NULL
)) {
1335 goto out_no_user_srf
;
1338 srf
= &user_srf
->srf
;
1341 srf
->flags
= req
->flags
;
1342 srf
->format
= req
->format
;
1343 srf
->scanout
= req
->scanout
;
1346 memcpy(srf
->mip_levels
, req
->mip_levels
, sizeof(srf
->mip_levels
));
1347 srf
->num_sizes
= num_sizes
;
1348 user_srf
->size
= size
;
1350 srf
->sizes
= kmalloc(srf
->num_sizes
* sizeof(*srf
->sizes
), GFP_KERNEL
);
1351 if (unlikely(srf
->sizes
== NULL
)) {
1355 srf
->offsets
= kmalloc(srf
->num_sizes
* sizeof(*srf
->offsets
),
1357 if (unlikely(srf
->sizes
== NULL
)) {
1359 goto out_no_offsets
;
1362 user_sizes
= (struct drm_vmw_size __user
*)(unsigned long)
1365 ret
= copy_from_user(srf
->sizes
, user_sizes
,
1366 srf
->num_sizes
* sizeof(*srf
->sizes
));
1367 if (unlikely(ret
!= 0)) {
1373 cur_offset
= srf
->offsets
;
1374 cur_size
= srf
->sizes
;
1376 bpp
= vmw_sf_bpp
[srf
->format
].bpp
;
1377 stride_bpp
= vmw_sf_bpp
[srf
->format
].s_bpp
;
1379 for (i
= 0; i
< DRM_VMW_MAX_SURFACE_FACES
; ++i
) {
1380 for (j
= 0; j
< srf
->mip_levels
[i
]; ++j
) {
1382 (cur_size
->width
* stride_bpp
+ 7) >> 3;
1384 cur_offset
->face
= i
;
1385 cur_offset
->mip
= j
;
1386 cur_offset
->bo_offset
= cur_bo_offset
;
1387 cur_bo_offset
+= stride
* cur_size
->height
*
1388 cur_size
->depth
* bpp
/ stride_bpp
;
1393 srf
->backup_size
= cur_bo_offset
;
1396 srf
->num_sizes
== 1 &&
1397 srf
->sizes
[0].width
== 64 &&
1398 srf
->sizes
[0].height
== 64 &&
1399 srf
->format
== SVGA3D_A8R8G8B8
) {
1401 /* allocate image area and clear it */
1402 srf
->snooper
.image
= kzalloc(64 * 64 * 4, GFP_KERNEL
);
1403 if (!srf
->snooper
.image
) {
1404 DRM_ERROR("Failed to allocate cursor_image\n");
1409 srf
->snooper
.image
= NULL
;
1411 srf
->snooper
.crtc
= NULL
;
1413 user_srf
->base
.shareable
= false;
1414 user_srf
->base
.tfile
= NULL
;
1417 * From this point, the generic resource management functions
1418 * destroy the object on failure.
1421 ret
= vmw_surface_init(dev_priv
, srf
, vmw_user_surface_free
);
1422 if (unlikely(ret
!= 0))
1425 tmp
= vmw_resource_reference(&srf
->res
);
1426 ret
= ttm_base_object_init(tfile
, &user_srf
->base
,
1427 req
->shareable
, VMW_RES_SURFACE
,
1428 &vmw_user_surface_base_release
, NULL
);
1430 if (unlikely(ret
!= 0)) {
1431 vmw_resource_unreference(&tmp
);
1432 vmw_resource_unreference(&res
);
1436 rep
->sid
= user_srf
->base
.hash
.key
;
1437 if (rep
->sid
== SVGA3D_INVALID_ID
)
1438 DRM_ERROR("Created bad Surface ID.\n");
1440 vmw_resource_unreference(&res
);
1442 ttm_read_unlock(&vmaster
->lock
);
1445 kfree(srf
->offsets
);
1451 ttm_mem_global_free(vmw_mem_glob(dev_priv
), size
);
1453 ttm_read_unlock(&vmaster
->lock
);
1457 int vmw_surface_reference_ioctl(struct drm_device
*dev
, void *data
,
1458 struct drm_file
*file_priv
)
1460 union drm_vmw_surface_reference_arg
*arg
=
1461 (union drm_vmw_surface_reference_arg
*)data
;
1462 struct drm_vmw_surface_arg
*req
= &arg
->req
;
1463 struct drm_vmw_surface_create_req
*rep
= &arg
->rep
;
1464 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1465 struct vmw_surface
*srf
;
1466 struct vmw_user_surface
*user_srf
;
1467 struct drm_vmw_size __user
*user_sizes
;
1468 struct ttm_base_object
*base
;
1471 base
= ttm_base_object_lookup(tfile
, req
->sid
);
1472 if (unlikely(base
== NULL
)) {
1473 DRM_ERROR("Could not find surface to reference.\n");
1477 if (unlikely(base
->object_type
!= VMW_RES_SURFACE
))
1478 goto out_bad_resource
;
1480 user_srf
= container_of(base
, struct vmw_user_surface
, base
);
1481 srf
= &user_srf
->srf
;
1483 ret
= ttm_ref_object_add(tfile
, &user_srf
->base
, TTM_REF_USAGE
, NULL
);
1484 if (unlikely(ret
!= 0)) {
1485 DRM_ERROR("Could not add a reference to a surface.\n");
1486 goto out_no_reference
;
1489 rep
->flags
= srf
->flags
;
1490 rep
->format
= srf
->format
;
1491 memcpy(rep
->mip_levels
, srf
->mip_levels
, sizeof(srf
->mip_levels
));
1492 user_sizes
= (struct drm_vmw_size __user
*)(unsigned long)
1496 ret
= copy_to_user(user_sizes
, srf
->sizes
,
1497 srf
->num_sizes
* sizeof(*srf
->sizes
));
1498 if (unlikely(ret
!= 0)) {
1499 DRM_ERROR("copy_to_user failed %p %u\n",
1500 user_sizes
, srf
->num_sizes
);
1505 ttm_base_object_unref(&base
);
1510 int vmw_surface_check(struct vmw_private
*dev_priv
,
1511 struct ttm_object_file
*tfile
,
1512 uint32_t handle
, int *id
)
1514 struct ttm_base_object
*base
;
1515 struct vmw_user_surface
*user_srf
;
1519 base
= ttm_base_object_lookup(tfile
, handle
);
1520 if (unlikely(base
== NULL
))
1523 if (unlikely(base
->object_type
!= VMW_RES_SURFACE
))
1524 goto out_bad_surface
;
1526 user_srf
= container_of(base
, struct vmw_user_surface
, base
);
1527 *id
= user_srf
->srf
.res
.id
;
1532 * FIXME: May deadlock here when called from the
1533 * command parsing code.
1536 ttm_base_object_unref(&base
);
1541 * Buffer management.
1543 void vmw_dmabuf_bo_free(struct ttm_buffer_object
*bo
)
1545 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
1550 int vmw_dmabuf_init(struct vmw_private
*dev_priv
,
1551 struct vmw_dma_buffer
*vmw_bo
,
1552 size_t size
, struct ttm_placement
*placement
,
1554 void (*bo_free
) (struct ttm_buffer_object
*bo
))
1556 struct ttm_bo_device
*bdev
= &dev_priv
->bdev
;
1562 acc_size
= ttm_bo_acc_size(bdev
, size
, sizeof(struct vmw_dma_buffer
));
1563 memset(vmw_bo
, 0, sizeof(*vmw_bo
));
1565 INIT_LIST_HEAD(&vmw_bo
->validate_list
);
1567 ret
= ttm_bo_init(bdev
, &vmw_bo
->base
, size
,
1568 ttm_bo_type_device
, placement
,
1569 0, 0, interruptible
,
1570 NULL
, acc_size
, bo_free
);
1574 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object
*bo
)
1576 struct vmw_user_dma_buffer
*vmw_user_bo
= vmw_user_dma_buffer(bo
);
1581 static void vmw_user_dmabuf_release(struct ttm_base_object
**p_base
)
1583 struct vmw_user_dma_buffer
*vmw_user_bo
;
1584 struct ttm_base_object
*base
= *p_base
;
1585 struct ttm_buffer_object
*bo
;
1589 if (unlikely(base
== NULL
))
1592 vmw_user_bo
= container_of(base
, struct vmw_user_dma_buffer
, base
);
1593 bo
= &vmw_user_bo
->dma
.base
;
1597 int vmw_dmabuf_alloc_ioctl(struct drm_device
*dev
, void *data
,
1598 struct drm_file
*file_priv
)
1600 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1601 union drm_vmw_alloc_dmabuf_arg
*arg
=
1602 (union drm_vmw_alloc_dmabuf_arg
*)data
;
1603 struct drm_vmw_alloc_dmabuf_req
*req
= &arg
->req
;
1604 struct drm_vmw_dmabuf_rep
*rep
= &arg
->rep
;
1605 struct vmw_user_dma_buffer
*vmw_user_bo
;
1606 struct ttm_buffer_object
*tmp
;
1607 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
1610 vmw_user_bo
= kzalloc(sizeof(*vmw_user_bo
), GFP_KERNEL
);
1611 if (unlikely(vmw_user_bo
== NULL
))
1614 ret
= ttm_read_lock(&vmaster
->lock
, true);
1615 if (unlikely(ret
!= 0)) {
1620 ret
= vmw_dmabuf_init(dev_priv
, &vmw_user_bo
->dma
, req
->size
,
1621 &vmw_vram_sys_placement
, true,
1622 &vmw_user_dmabuf_destroy
);
1623 if (unlikely(ret
!= 0))
1626 tmp
= ttm_bo_reference(&vmw_user_bo
->dma
.base
);
1627 ret
= ttm_base_object_init(vmw_fpriv(file_priv
)->tfile
,
1631 &vmw_user_dmabuf_release
, NULL
);
1632 if (unlikely(ret
!= 0))
1633 goto out_no_base_object
;
1635 rep
->handle
= vmw_user_bo
->base
.hash
.key
;
1636 rep
->map_handle
= vmw_user_bo
->dma
.base
.addr_space_offset
;
1637 rep
->cur_gmr_id
= vmw_user_bo
->base
.hash
.key
;
1638 rep
->cur_gmr_offset
= 0;
1644 ttm_read_unlock(&vmaster
->lock
);
1649 int vmw_dmabuf_unref_ioctl(struct drm_device
*dev
, void *data
,
1650 struct drm_file
*file_priv
)
1652 struct drm_vmw_unref_dmabuf_arg
*arg
=
1653 (struct drm_vmw_unref_dmabuf_arg
*)data
;
1655 return ttm_ref_object_base_unref(vmw_fpriv(file_priv
)->tfile
,
1660 uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object
*bo
,
1661 uint32_t cur_validate_node
)
1663 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
1665 if (likely(vmw_bo
->on_validate_list
))
1666 return vmw_bo
->cur_validate_node
;
1668 vmw_bo
->cur_validate_node
= cur_validate_node
;
1669 vmw_bo
->on_validate_list
= true;
1671 return cur_validate_node
;
1674 void vmw_dmabuf_validate_clear(struct ttm_buffer_object
*bo
)
1676 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
1678 vmw_bo
->on_validate_list
= false;
1681 int vmw_user_dmabuf_lookup(struct ttm_object_file
*tfile
,
1682 uint32_t handle
, struct vmw_dma_buffer
**out
)
1684 struct vmw_user_dma_buffer
*vmw_user_bo
;
1685 struct ttm_base_object
*base
;
1687 base
= ttm_base_object_lookup(tfile
, handle
);
1688 if (unlikely(base
== NULL
)) {
1689 printk(KERN_ERR
"Invalid buffer object handle 0x%08lx.\n",
1690 (unsigned long)handle
);
1694 if (unlikely(base
->object_type
!= ttm_buffer_type
)) {
1695 ttm_base_object_unref(&base
);
1696 printk(KERN_ERR
"Invalid buffer object handle 0x%08lx.\n",
1697 (unsigned long)handle
);
1701 vmw_user_bo
= container_of(base
, struct vmw_user_dma_buffer
, base
);
1702 (void)ttm_bo_reference(&vmw_user_bo
->dma
.base
);
1703 ttm_base_object_unref(&base
);
1704 *out
= &vmw_user_bo
->dma
;
1713 static void vmw_stream_destroy(struct vmw_resource
*res
)
1715 struct vmw_private
*dev_priv
= res
->dev_priv
;
1716 struct vmw_stream
*stream
;
1719 DRM_INFO("%s: unref\n", __func__
);
1720 stream
= container_of(res
, struct vmw_stream
, res
);
1722 ret
= vmw_overlay_unref(dev_priv
, stream
->stream_id
);
1726 static int vmw_stream_init(struct vmw_private
*dev_priv
,
1727 struct vmw_stream
*stream
,
1728 void (*res_free
) (struct vmw_resource
*res
))
1730 struct vmw_resource
*res
= &stream
->res
;
1733 ret
= vmw_resource_init(dev_priv
, res
, &dev_priv
->stream_idr
,
1734 VMW_RES_STREAM
, false, res_free
, NULL
);
1736 if (unlikely(ret
!= 0)) {
1737 if (res_free
== NULL
)
1740 res_free(&stream
->res
);
1744 ret
= vmw_overlay_claim(dev_priv
, &stream
->stream_id
);
1746 vmw_resource_unreference(&res
);
1750 DRM_INFO("%s: claimed\n", __func__
);
1752 vmw_resource_activate(&stream
->res
, vmw_stream_destroy
);
1757 * User-space context management:
1760 static void vmw_user_stream_free(struct vmw_resource
*res
)
1762 struct vmw_user_stream
*stream
=
1763 container_of(res
, struct vmw_user_stream
, stream
.res
);
1764 struct vmw_private
*dev_priv
= res
->dev_priv
;
1767 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
1768 vmw_user_stream_size
);
1772 * This function is called when user space has no more references on the
1773 * base object. It releases the base-object's reference on the resource object.
1776 static void vmw_user_stream_base_release(struct ttm_base_object
**p_base
)
1778 struct ttm_base_object
*base
= *p_base
;
1779 struct vmw_user_stream
*stream
=
1780 container_of(base
, struct vmw_user_stream
, base
);
1781 struct vmw_resource
*res
= &stream
->stream
.res
;
1784 vmw_resource_unreference(&res
);
1787 int vmw_stream_unref_ioctl(struct drm_device
*dev
, void *data
,
1788 struct drm_file
*file_priv
)
1790 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1791 struct vmw_resource
*res
;
1792 struct vmw_user_stream
*stream
;
1793 struct drm_vmw_stream_arg
*arg
= (struct drm_vmw_stream_arg
*)data
;
1794 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1797 res
= vmw_resource_lookup(dev_priv
, &dev_priv
->stream_idr
, arg
->stream_id
);
1798 if (unlikely(res
== NULL
))
1801 if (res
->res_free
!= &vmw_user_stream_free
) {
1806 stream
= container_of(res
, struct vmw_user_stream
, stream
.res
);
1807 if (stream
->base
.tfile
!= tfile
) {
1812 ttm_ref_object_base_unref(tfile
, stream
->base
.hash
.key
, TTM_REF_USAGE
);
1814 vmw_resource_unreference(&res
);
1818 int vmw_stream_claim_ioctl(struct drm_device
*dev
, void *data
,
1819 struct drm_file
*file_priv
)
1821 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1822 struct vmw_user_stream
*stream
;
1823 struct vmw_resource
*res
;
1824 struct vmw_resource
*tmp
;
1825 struct drm_vmw_stream_arg
*arg
= (struct drm_vmw_stream_arg
*)data
;
1826 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1827 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
1831 * Approximate idr memory usage with 128 bytes. It will be limited
1832 * by maximum number_of streams anyway?
1835 if (unlikely(vmw_user_stream_size
== 0))
1836 vmw_user_stream_size
= ttm_round_pot(sizeof(*stream
)) + 128;
1838 ret
= ttm_read_lock(&vmaster
->lock
, true);
1839 if (unlikely(ret
!= 0))
1842 ret
= ttm_mem_global_alloc(vmw_mem_glob(dev_priv
),
1843 vmw_user_stream_size
,
1845 if (unlikely(ret
!= 0)) {
1846 if (ret
!= -ERESTARTSYS
)
1847 DRM_ERROR("Out of graphics memory for stream"
1853 stream
= kmalloc(sizeof(*stream
), GFP_KERNEL
);
1854 if (unlikely(stream
== NULL
)) {
1855 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
1856 vmw_user_stream_size
);
1861 res
= &stream
->stream
.res
;
1862 stream
->base
.shareable
= false;
1863 stream
->base
.tfile
= NULL
;
1866 * From here on, the destructor takes over resource freeing.
1869 ret
= vmw_stream_init(dev_priv
, &stream
->stream
, vmw_user_stream_free
);
1870 if (unlikely(ret
!= 0))
1873 tmp
= vmw_resource_reference(res
);
1874 ret
= ttm_base_object_init(tfile
, &stream
->base
, false, VMW_RES_STREAM
,
1875 &vmw_user_stream_base_release
, NULL
);
1877 if (unlikely(ret
!= 0)) {
1878 vmw_resource_unreference(&tmp
);
1882 arg
->stream_id
= res
->id
;
1884 vmw_resource_unreference(&res
);
1886 ttm_read_unlock(&vmaster
->lock
);
1890 int vmw_user_stream_lookup(struct vmw_private
*dev_priv
,
1891 struct ttm_object_file
*tfile
,
1892 uint32_t *inout_id
, struct vmw_resource
**out
)
1894 struct vmw_user_stream
*stream
;
1895 struct vmw_resource
*res
;
1898 res
= vmw_resource_lookup(dev_priv
, &dev_priv
->stream_idr
, *inout_id
);
1899 if (unlikely(res
== NULL
))
1902 if (res
->res_free
!= &vmw_user_stream_free
) {
1907 stream
= container_of(res
, struct vmw_user_stream
, stream
.res
);
1908 if (stream
->base
.tfile
!= tfile
) {
1913 *inout_id
= stream
->stream
.stream_id
;
1917 vmw_resource_unreference(&res
);