1 /**************************************************************************
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_resource_priv.h"
30 #include <ttm/ttm_placement.h>
31 #include "svga3d_surfacedefs.h"
34 * struct vmw_user_surface - User-space visible surface resource
36 * @base: The TTM base object handling user-space visibility.
37 * @srf: The surface metadata.
38 * @size: TTM accounting size for the surface.
39 * @master: master of the creating client. Used for security check.
41 struct vmw_user_surface
{
42 struct ttm_prime_object prime
;
43 struct vmw_surface srf
;
45 struct drm_master
*master
;
49 * struct vmw_surface_offset - Backing store mip level offset info
51 * @face: Surface face.
53 * @bo_offset: Offset into backing store of this mip level.
56 struct vmw_surface_offset
{
62 static void vmw_user_surface_free(struct vmw_resource
*res
);
63 static struct vmw_resource
*
64 vmw_user_surface_base_to_res(struct ttm_base_object
*base
);
65 static int vmw_legacy_srf_bind(struct vmw_resource
*res
,
66 struct ttm_validate_buffer
*val_buf
);
67 static int vmw_legacy_srf_unbind(struct vmw_resource
*res
,
69 struct ttm_validate_buffer
*val_buf
);
70 static int vmw_legacy_srf_create(struct vmw_resource
*res
);
71 static int vmw_legacy_srf_destroy(struct vmw_resource
*res
);
72 static int vmw_gb_surface_create(struct vmw_resource
*res
);
73 static int vmw_gb_surface_bind(struct vmw_resource
*res
,
74 struct ttm_validate_buffer
*val_buf
);
75 static int vmw_gb_surface_unbind(struct vmw_resource
*res
,
77 struct ttm_validate_buffer
*val_buf
);
78 static int vmw_gb_surface_destroy(struct vmw_resource
*res
);
81 static const struct vmw_user_resource_conv user_surface_conv
= {
82 .object_type
= VMW_RES_SURFACE
,
83 .base_obj_to_res
= vmw_user_surface_base_to_res
,
84 .res_free
= vmw_user_surface_free
87 const struct vmw_user_resource_conv
*user_surface_converter
=
91 static uint64_t vmw_user_surface_size
;
93 static const struct vmw_res_func vmw_legacy_surface_func
= {
94 .res_type
= vmw_res_surface
,
95 .needs_backup
= false,
97 .type_name
= "legacy surfaces",
98 .backup_placement
= &vmw_srf_placement
,
99 .create
= &vmw_legacy_srf_create
,
100 .destroy
= &vmw_legacy_srf_destroy
,
101 .bind
= &vmw_legacy_srf_bind
,
102 .unbind
= &vmw_legacy_srf_unbind
105 static const struct vmw_res_func vmw_gb_surface_func
= {
106 .res_type
= vmw_res_surface
,
107 .needs_backup
= true,
109 .type_name
= "guest backed surfaces",
110 .backup_placement
= &vmw_mob_placement
,
111 .create
= vmw_gb_surface_create
,
112 .destroy
= vmw_gb_surface_destroy
,
113 .bind
= vmw_gb_surface_bind
,
114 .unbind
= vmw_gb_surface_unbind
118 * struct vmw_surface_dma - SVGA3D DMA command
120 struct vmw_surface_dma
{
121 SVGA3dCmdHeader header
;
122 SVGA3dCmdSurfaceDMA body
;
124 SVGA3dCmdSurfaceDMASuffix suffix
;
128 * struct vmw_surface_define - SVGA3D Surface Define command
130 struct vmw_surface_define
{
131 SVGA3dCmdHeader header
;
132 SVGA3dCmdDefineSurface body
;
136 * struct vmw_surface_destroy - SVGA3D Surface Destroy command
138 struct vmw_surface_destroy
{
139 SVGA3dCmdHeader header
;
140 SVGA3dCmdDestroySurface body
;
145 * vmw_surface_dma_size - Compute fifo size for a dma command.
147 * @srf: Pointer to a struct vmw_surface
149 * Computes the required size for a surface dma command for backup or
150 * restoration of the surface represented by @srf.
152 static inline uint32_t vmw_surface_dma_size(const struct vmw_surface
*srf
)
154 return srf
->num_sizes
* sizeof(struct vmw_surface_dma
);
159 * vmw_surface_define_size - Compute fifo size for a surface define command.
161 * @srf: Pointer to a struct vmw_surface
163 * Computes the required size for a surface define command for the definition
164 * of the surface represented by @srf.
166 static inline uint32_t vmw_surface_define_size(const struct vmw_surface
*srf
)
168 return sizeof(struct vmw_surface_define
) + srf
->num_sizes
*
174 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
176 * Computes the required size for a surface destroy command for the destruction
179 static inline uint32_t vmw_surface_destroy_size(void)
181 return sizeof(struct vmw_surface_destroy
);
185 * vmw_surface_destroy_encode - Encode a surface_destroy command.
187 * @id: The surface id
188 * @cmd_space: Pointer to memory area in which the commands should be encoded.
190 static void vmw_surface_destroy_encode(uint32_t id
,
193 struct vmw_surface_destroy
*cmd
= (struct vmw_surface_destroy
*)
196 cmd
->header
.id
= SVGA_3D_CMD_SURFACE_DESTROY
;
197 cmd
->header
.size
= sizeof(cmd
->body
);
202 * vmw_surface_define_encode - Encode a surface_define command.
204 * @srf: Pointer to a struct vmw_surface object.
205 * @cmd_space: Pointer to memory area in which the commands should be encoded.
207 static void vmw_surface_define_encode(const struct vmw_surface
*srf
,
210 struct vmw_surface_define
*cmd
= (struct vmw_surface_define
*)
212 struct drm_vmw_size
*src_size
;
213 SVGA3dSize
*cmd_size
;
217 cmd_len
= sizeof(cmd
->body
) + srf
->num_sizes
* sizeof(SVGA3dSize
);
219 cmd
->header
.id
= SVGA_3D_CMD_SURFACE_DEFINE
;
220 cmd
->header
.size
= cmd_len
;
221 cmd
->body
.sid
= srf
->res
.id
;
222 cmd
->body
.surfaceFlags
= srf
->flags
;
223 cmd
->body
.format
= cpu_to_le32(srf
->format
);
224 for (i
= 0; i
< DRM_VMW_MAX_SURFACE_FACES
; ++i
)
225 cmd
->body
.face
[i
].numMipLevels
= srf
->mip_levels
[i
];
228 cmd_size
= (SVGA3dSize
*) cmd
;
229 src_size
= srf
->sizes
;
231 for (i
= 0; i
< srf
->num_sizes
; ++i
, cmd_size
++, src_size
++) {
232 cmd_size
->width
= src_size
->width
;
233 cmd_size
->height
= src_size
->height
;
234 cmd_size
->depth
= src_size
->depth
;
239 * vmw_surface_dma_encode - Encode a surface_dma command.
241 * @srf: Pointer to a struct vmw_surface object.
242 * @cmd_space: Pointer to memory area in which the commands should be encoded.
243 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
244 * should be placed or read from.
245 * @to_surface: Boolean whether to DMA to the surface or from the surface.
247 static void vmw_surface_dma_encode(struct vmw_surface
*srf
,
249 const SVGAGuestPtr
*ptr
,
253 struct vmw_surface_dma
*cmd
= (struct vmw_surface_dma
*)cmd_space
;
254 const struct svga3d_surface_desc
*desc
=
255 svga3dsurface_get_desc(srf
->format
);
257 for (i
= 0; i
< srf
->num_sizes
; ++i
) {
258 SVGA3dCmdHeader
*header
= &cmd
->header
;
259 SVGA3dCmdSurfaceDMA
*body
= &cmd
->body
;
260 SVGA3dCopyBox
*cb
= &cmd
->cb
;
261 SVGA3dCmdSurfaceDMASuffix
*suffix
= &cmd
->suffix
;
262 const struct vmw_surface_offset
*cur_offset
= &srf
->offsets
[i
];
263 const struct drm_vmw_size
*cur_size
= &srf
->sizes
[i
];
265 header
->id
= SVGA_3D_CMD_SURFACE_DMA
;
266 header
->size
= sizeof(*body
) + sizeof(*cb
) + sizeof(*suffix
);
268 body
->guest
.ptr
= *ptr
;
269 body
->guest
.ptr
.offset
+= cur_offset
->bo_offset
;
270 body
->guest
.pitch
= svga3dsurface_calculate_pitch(desc
,
272 body
->host
.sid
= srf
->res
.id
;
273 body
->host
.face
= cur_offset
->face
;
274 body
->host
.mipmap
= cur_offset
->mip
;
275 body
->transfer
= ((to_surface
) ? SVGA3D_WRITE_HOST_VRAM
:
276 SVGA3D_READ_HOST_VRAM
);
283 cb
->w
= cur_size
->width
;
284 cb
->h
= cur_size
->height
;
285 cb
->d
= cur_size
->depth
;
287 suffix
->suffixSize
= sizeof(*suffix
);
288 suffix
->maximumOffset
=
289 svga3dsurface_get_image_buffer_size(desc
, cur_size
,
291 suffix
->flags
.discard
= 0;
292 suffix
->flags
.unsynchronized
= 0;
293 suffix
->flags
.reserved
= 0;
300 * vmw_hw_surface_destroy - destroy a Device surface
302 * @res: Pointer to a struct vmw_resource embedded in a struct
305 * Destroys a the device surface associated with a struct vmw_surface if
306 * any, and adjusts accounting and resource count accordingly.
308 static void vmw_hw_surface_destroy(struct vmw_resource
*res
)
311 struct vmw_private
*dev_priv
= res
->dev_priv
;
312 struct vmw_surface
*srf
;
315 if (res
->func
->destroy
== vmw_gb_surface_destroy
) {
316 (void) vmw_gb_surface_destroy(res
);
322 cmd
= vmw_fifo_reserve(dev_priv
, vmw_surface_destroy_size());
323 if (unlikely(cmd
== NULL
)) {
324 DRM_ERROR("Failed reserving FIFO space for surface "
329 vmw_surface_destroy_encode(res
->id
, cmd
);
330 vmw_fifo_commit(dev_priv
, vmw_surface_destroy_size());
333 * used_memory_size_atomic, or separate lock
334 * to avoid taking dev_priv::cmdbuf_mutex in
338 mutex_lock(&dev_priv
->cmdbuf_mutex
);
339 srf
= vmw_res_to_srf(res
);
340 dev_priv
->used_memory_size
-= res
->backup_size
;
341 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
343 vmw_3d_resource_dec(dev_priv
, false);
347 * vmw_legacy_srf_create - Create a device surface as part of the
348 * resource validation process.
350 * @res: Pointer to a struct vmw_surface.
352 * If the surface doesn't have a hw id.
354 * Returns -EBUSY if there wasn't sufficient device resources to
355 * complete the validation. Retry after freeing up resources.
357 * May return other errors if the kernel is out of guest resources.
359 static int vmw_legacy_srf_create(struct vmw_resource
*res
)
361 struct vmw_private
*dev_priv
= res
->dev_priv
;
362 struct vmw_surface
*srf
;
363 uint32_t submit_size
;
367 if (likely(res
->id
!= -1))
370 srf
= vmw_res_to_srf(res
);
371 if (unlikely(dev_priv
->used_memory_size
+ res
->backup_size
>=
372 dev_priv
->memory_size
))
376 * Alloc id for the resource.
379 ret
= vmw_resource_alloc_id(res
);
380 if (unlikely(ret
!= 0)) {
381 DRM_ERROR("Failed to allocate a surface id.\n");
385 if (unlikely(res
->id
>= SVGA3D_MAX_SURFACE_IDS
)) {
391 * Encode surface define- commands.
394 submit_size
= vmw_surface_define_size(srf
);
395 cmd
= vmw_fifo_reserve(dev_priv
, submit_size
);
396 if (unlikely(cmd
== NULL
)) {
397 DRM_ERROR("Failed reserving FIFO space for surface "
403 vmw_surface_define_encode(srf
, cmd
);
404 vmw_fifo_commit(dev_priv
, submit_size
);
406 * Surface memory usage accounting.
409 dev_priv
->used_memory_size
+= res
->backup_size
;
413 vmw_resource_release_id(res
);
419 * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
421 * @res: Pointer to a struct vmw_res embedded in a struct
423 * @val_buf: Pointer to a struct ttm_validate_buffer containing
424 * information about the backup buffer.
425 * @bind: Boolean wether to DMA to the surface.
427 * Transfer backup data to or from a legacy surface as part of the
428 * validation process.
429 * May return other errors if the kernel is out of guest resources.
430 * The backup buffer will be fenced or idle upon successful completion,
431 * and if the surface needs persistent backup storage, the backup buffer
432 * will also be returned reserved iff @bind is true.
434 static int vmw_legacy_srf_dma(struct vmw_resource
*res
,
435 struct ttm_validate_buffer
*val_buf
,
439 struct vmw_fence_obj
*fence
;
440 uint32_t submit_size
;
441 struct vmw_surface
*srf
= vmw_res_to_srf(res
);
443 struct vmw_private
*dev_priv
= res
->dev_priv
;
445 BUG_ON(val_buf
->bo
== NULL
);
447 submit_size
= vmw_surface_dma_size(srf
);
448 cmd
= vmw_fifo_reserve(dev_priv
, submit_size
);
449 if (unlikely(cmd
== NULL
)) {
450 DRM_ERROR("Failed reserving FIFO space for surface "
454 vmw_bo_get_guest_ptr(val_buf
->bo
, &ptr
);
455 vmw_surface_dma_encode(srf
, cmd
, &ptr
, bind
);
457 vmw_fifo_commit(dev_priv
, submit_size
);
460 * Create a fence object and fence the backup buffer.
463 (void) vmw_execbuf_fence_commands(NULL
, dev_priv
,
466 vmw_fence_single_bo(val_buf
->bo
, fence
);
468 if (likely(fence
!= NULL
))
469 vmw_fence_obj_unreference(&fence
);
475 * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
476 * surface validation process.
478 * @res: Pointer to a struct vmw_res embedded in a struct
480 * @val_buf: Pointer to a struct ttm_validate_buffer containing
481 * information about the backup buffer.
483 * This function will copy backup data to the surface if the
484 * backup buffer is dirty.
486 static int vmw_legacy_srf_bind(struct vmw_resource
*res
,
487 struct ttm_validate_buffer
*val_buf
)
489 if (!res
->backup_dirty
)
492 return vmw_legacy_srf_dma(res
, val_buf
, true);
497 * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
498 * surface eviction process.
500 * @res: Pointer to a struct vmw_res embedded in a struct
502 * @val_buf: Pointer to a struct ttm_validate_buffer containing
503 * information about the backup buffer.
505 * This function will copy backup data from the surface.
507 static int vmw_legacy_srf_unbind(struct vmw_resource
*res
,
509 struct ttm_validate_buffer
*val_buf
)
511 if (unlikely(readback
))
512 return vmw_legacy_srf_dma(res
, val_buf
, false);
517 * vmw_legacy_srf_destroy - Destroy a device surface as part of a
518 * resource eviction process.
520 * @res: Pointer to a struct vmw_res embedded in a struct
523 static int vmw_legacy_srf_destroy(struct vmw_resource
*res
)
525 struct vmw_private
*dev_priv
= res
->dev_priv
;
526 uint32_t submit_size
;
529 BUG_ON(res
->id
== -1);
532 * Encode the dma- and surface destroy commands.
535 submit_size
= vmw_surface_destroy_size();
536 cmd
= vmw_fifo_reserve(dev_priv
, submit_size
);
537 if (unlikely(cmd
== NULL
)) {
538 DRM_ERROR("Failed reserving FIFO space for surface "
543 vmw_surface_destroy_encode(res
->id
, cmd
);
544 vmw_fifo_commit(dev_priv
, submit_size
);
547 * Surface memory usage accounting.
550 dev_priv
->used_memory_size
-= res
->backup_size
;
553 * Release the surface ID.
556 vmw_resource_release_id(res
);
563 * vmw_surface_init - initialize a struct vmw_surface
565 * @dev_priv: Pointer to a device private struct.
566 * @srf: Pointer to the struct vmw_surface to initialize.
567 * @res_free: Pointer to a resource destructor used to free
570 static int vmw_surface_init(struct vmw_private
*dev_priv
,
571 struct vmw_surface
*srf
,
572 void (*res_free
) (struct vmw_resource
*res
))
575 struct vmw_resource
*res
= &srf
->res
;
577 BUG_ON(res_free
== NULL
);
578 if (!dev_priv
->has_mob
)
579 (void) vmw_3d_resource_inc(dev_priv
, false);
580 ret
= vmw_resource_init(dev_priv
, res
, true, res_free
,
581 (dev_priv
->has_mob
) ? &vmw_gb_surface_func
:
582 &vmw_legacy_surface_func
);
584 if (unlikely(ret
!= 0)) {
585 if (!dev_priv
->has_mob
)
586 vmw_3d_resource_dec(dev_priv
, false);
592 * The surface won't be visible to hardware until a
596 vmw_resource_activate(res
, vmw_hw_surface_destroy
);
601 * vmw_user_surface_base_to_res - TTM base object to resource converter for
602 * user visible surfaces
604 * @base: Pointer to a TTM base object
606 * Returns the struct vmw_resource embedded in a struct vmw_surface
607 * for the user-visible object identified by the TTM base object @base.
609 static struct vmw_resource
*
610 vmw_user_surface_base_to_res(struct ttm_base_object
*base
)
612 return &(container_of(base
, struct vmw_user_surface
,
613 prime
.base
)->srf
.res
);
617 * vmw_user_surface_free - User visible surface resource destructor
619 * @res: A struct vmw_resource embedded in a struct vmw_surface.
621 static void vmw_user_surface_free(struct vmw_resource
*res
)
623 struct vmw_surface
*srf
= vmw_res_to_srf(res
);
624 struct vmw_user_surface
*user_srf
=
625 container_of(srf
, struct vmw_user_surface
, srf
);
626 struct vmw_private
*dev_priv
= srf
->res
.dev_priv
;
627 uint32_t size
= user_srf
->size
;
629 if (user_srf
->master
)
630 drm_master_put(&user_srf
->master
);
633 kfree(srf
->snooper
.image
);
634 ttm_prime_object_kfree(user_srf
, prime
);
635 ttm_mem_global_free(vmw_mem_glob(dev_priv
), size
);
639 * vmw_user_surface_free - User visible surface TTM base object destructor
641 * @p_base: Pointer to a pointer to a TTM base object
642 * embedded in a struct vmw_user_surface.
644 * Drops the base object's reference on its resource, and the
645 * pointer pointed to by *p_base is set to NULL.
647 static void vmw_user_surface_base_release(struct ttm_base_object
**p_base
)
649 struct ttm_base_object
*base
= *p_base
;
650 struct vmw_user_surface
*user_srf
=
651 container_of(base
, struct vmw_user_surface
, prime
.base
);
652 struct vmw_resource
*res
= &user_srf
->srf
.res
;
655 vmw_resource_unreference(&res
);
659 * vmw_user_surface_destroy_ioctl - Ioctl function implementing
660 * the user surface destroy functionality.
662 * @dev: Pointer to a struct drm_device.
663 * @data: Pointer to data copied from / to user-space.
664 * @file_priv: Pointer to a drm file private structure.
666 int vmw_surface_destroy_ioctl(struct drm_device
*dev
, void *data
,
667 struct drm_file
*file_priv
)
669 struct drm_vmw_surface_arg
*arg
= (struct drm_vmw_surface_arg
*)data
;
670 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
672 return ttm_ref_object_base_unref(tfile
, arg
->sid
, TTM_REF_USAGE
);
676 * vmw_user_surface_define_ioctl - Ioctl function implementing
677 * the user surface define functionality.
679 * @dev: Pointer to a struct drm_device.
680 * @data: Pointer to data copied from / to user-space.
681 * @file_priv: Pointer to a drm file private structure.
683 int vmw_surface_define_ioctl(struct drm_device
*dev
, void *data
,
684 struct drm_file
*file_priv
)
686 struct vmw_private
*dev_priv
= vmw_priv(dev
);
687 struct vmw_user_surface
*user_srf
;
688 struct vmw_surface
*srf
;
689 struct vmw_resource
*res
;
690 struct vmw_resource
*tmp
;
691 union drm_vmw_surface_create_arg
*arg
=
692 (union drm_vmw_surface_create_arg
*)data
;
693 struct drm_vmw_surface_create_req
*req
= &arg
->req
;
694 struct drm_vmw_surface_arg
*rep
= &arg
->rep
;
695 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
696 struct drm_vmw_size __user
*user_sizes
;
699 uint32_t cur_bo_offset
;
700 struct drm_vmw_size
*cur_size
;
701 struct vmw_surface_offset
*cur_offset
;
704 const struct svga3d_surface_desc
*desc
;
706 if (unlikely(vmw_user_surface_size
== 0))
707 vmw_user_surface_size
= ttm_round_pot(sizeof(*user_srf
)) +
711 for (i
= 0; i
< DRM_VMW_MAX_SURFACE_FACES
; ++i
)
712 num_sizes
+= req
->mip_levels
[i
];
714 if (num_sizes
> DRM_VMW_MAX_SURFACE_FACES
*
715 DRM_VMW_MAX_MIP_LEVELS
)
718 size
= vmw_user_surface_size
+ 128 +
719 ttm_round_pot(num_sizes
* sizeof(struct drm_vmw_size
)) +
720 ttm_round_pot(num_sizes
* sizeof(struct vmw_surface_offset
));
723 desc
= svga3dsurface_get_desc(req
->format
);
724 if (unlikely(desc
->block_desc
== SVGA3DBLOCKDESC_NONE
)) {
725 DRM_ERROR("Invalid surface format for surface creation.\n");
729 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
730 if (unlikely(ret
!= 0))
733 ret
= ttm_mem_global_alloc(vmw_mem_glob(dev_priv
),
735 if (unlikely(ret
!= 0)) {
736 if (ret
!= -ERESTARTSYS
)
737 DRM_ERROR("Out of graphics memory for surface"
742 user_srf
= kzalloc(sizeof(*user_srf
), GFP_KERNEL
);
743 if (unlikely(user_srf
== NULL
)) {
745 goto out_no_user_srf
;
748 srf
= &user_srf
->srf
;
751 srf
->flags
= req
->flags
;
752 srf
->format
= req
->format
;
753 srf
->scanout
= req
->scanout
;
755 memcpy(srf
->mip_levels
, req
->mip_levels
, sizeof(srf
->mip_levels
));
756 srf
->num_sizes
= num_sizes
;
757 user_srf
->size
= size
;
759 srf
->sizes
= kmalloc(srf
->num_sizes
* sizeof(*srf
->sizes
), GFP_KERNEL
);
760 if (unlikely(srf
->sizes
== NULL
)) {
764 srf
->offsets
= kmalloc(srf
->num_sizes
* sizeof(*srf
->offsets
),
766 if (unlikely(srf
->sizes
== NULL
)) {
771 user_sizes
= (struct drm_vmw_size __user
*)(unsigned long)
774 ret
= copy_from_user(srf
->sizes
, user_sizes
,
775 srf
->num_sizes
* sizeof(*srf
->sizes
));
776 if (unlikely(ret
!= 0)) {
781 srf
->base_size
= *srf
->sizes
;
782 srf
->autogen_filter
= SVGA3D_TEX_FILTER_NONE
;
783 srf
->multisample_count
= 0;
786 cur_offset
= srf
->offsets
;
787 cur_size
= srf
->sizes
;
789 for (i
= 0; i
< DRM_VMW_MAX_SURFACE_FACES
; ++i
) {
790 for (j
= 0; j
< srf
->mip_levels
[i
]; ++j
) {
791 uint32_t stride
= svga3dsurface_calculate_pitch
794 cur_offset
->face
= i
;
796 cur_offset
->bo_offset
= cur_bo_offset
;
797 cur_bo_offset
+= svga3dsurface_get_image_buffer_size
798 (desc
, cur_size
, stride
);
803 res
->backup_size
= cur_bo_offset
;
805 srf
->num_sizes
== 1 &&
806 srf
->sizes
[0].width
== 64 &&
807 srf
->sizes
[0].height
== 64 &&
808 srf
->format
== SVGA3D_A8R8G8B8
) {
810 srf
->snooper
.image
= kmalloc(64 * 64 * 4, GFP_KERNEL
);
811 /* clear the image */
812 if (srf
->snooper
.image
) {
813 memset(srf
->snooper
.image
, 0x00, 64 * 64 * 4);
815 DRM_ERROR("Failed to allocate cursor_image\n");
820 srf
->snooper
.image
= NULL
;
822 srf
->snooper
.crtc
= NULL
;
824 user_srf
->prime
.base
.shareable
= false;
825 user_srf
->prime
.base
.tfile
= NULL
;
826 if (drm_is_primary_client(file_priv
))
827 user_srf
->master
= drm_master_get(file_priv
->master
);
830 * From this point, the generic resource management functions
831 * destroy the object on failure.
834 ret
= vmw_surface_init(dev_priv
, srf
, vmw_user_surface_free
);
835 if (unlikely(ret
!= 0))
839 * A gb-aware client referencing a shared surface will
840 * expect a backup buffer to be present.
842 if (dev_priv
->has_mob
&& req
->shareable
) {
843 uint32_t backup_handle
;
845 ret
= vmw_user_dmabuf_alloc(dev_priv
, tfile
,
850 if (unlikely(ret
!= 0)) {
851 vmw_resource_unreference(&res
);
856 tmp
= vmw_resource_reference(&srf
->res
);
857 ret
= ttm_prime_object_init(tfile
, res
->backup_size
, &user_srf
->prime
,
858 req
->shareable
, VMW_RES_SURFACE
,
859 &vmw_user_surface_base_release
, NULL
);
861 if (unlikely(ret
!= 0)) {
862 vmw_resource_unreference(&tmp
);
863 vmw_resource_unreference(&res
);
867 rep
->sid
= user_srf
->prime
.base
.hash
.key
;
868 vmw_resource_unreference(&res
);
870 ttm_read_unlock(&dev_priv
->reservation_sem
);
877 ttm_prime_object_kfree(user_srf
, prime
);
879 ttm_mem_global_free(vmw_mem_glob(dev_priv
), size
);
881 ttm_read_unlock(&dev_priv
->reservation_sem
);
887 vmw_surface_handle_reference(struct vmw_private
*dev_priv
,
888 struct drm_file
*file_priv
,
890 enum drm_vmw_handle_type handle_type
,
891 struct ttm_base_object
**base_p
)
893 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
894 struct vmw_user_surface
*user_srf
;
896 struct ttm_base_object
*base
;
899 if (handle_type
== DRM_VMW_HANDLE_PRIME
) {
900 ret
= ttm_prime_fd_to_handle(tfile
, u_handle
, &handle
);
901 if (unlikely(ret
!= 0))
904 if (unlikely(drm_is_render_client(file_priv
))) {
905 DRM_ERROR("Render client refused legacy "
906 "surface reference.\n");
913 base
= ttm_base_object_lookup_for_ref(dev_priv
->tdev
, handle
);
914 if (unlikely(base
== NULL
)) {
915 DRM_ERROR("Could not find surface to reference.\n");
919 if (unlikely(ttm_base_object_type(base
) != VMW_RES_SURFACE
)) {
920 DRM_ERROR("Referenced object is not a surface.\n");
921 goto out_bad_resource
;
924 if (handle_type
!= DRM_VMW_HANDLE_PRIME
) {
925 user_srf
= container_of(base
, struct vmw_user_surface
,
929 * Make sure the surface creator has the same
930 * authenticating master.
932 if (drm_is_primary_client(file_priv
) &&
933 user_srf
->master
!= file_priv
->master
) {
934 DRM_ERROR("Trying to reference surface outside of"
935 " master domain.\n");
937 goto out_bad_resource
;
940 ret
= ttm_ref_object_add(tfile
, base
, TTM_REF_USAGE
, NULL
);
941 if (unlikely(ret
!= 0)) {
942 DRM_ERROR("Could not add a reference to a surface.\n");
943 goto out_bad_resource
;
951 ttm_base_object_unref(&base
);
953 if (handle_type
== DRM_VMW_HANDLE_PRIME
)
954 (void) ttm_ref_object_base_unref(tfile
, handle
, TTM_REF_USAGE
);
960 * vmw_user_surface_define_ioctl - Ioctl function implementing
961 * the user surface reference functionality.
963 * @dev: Pointer to a struct drm_device.
964 * @data: Pointer to data copied from / to user-space.
965 * @file_priv: Pointer to a drm file private structure.
967 int vmw_surface_reference_ioctl(struct drm_device
*dev
, void *data
,
968 struct drm_file
*file_priv
)
970 struct vmw_private
*dev_priv
= vmw_priv(dev
);
971 union drm_vmw_surface_reference_arg
*arg
=
972 (union drm_vmw_surface_reference_arg
*)data
;
973 struct drm_vmw_surface_arg
*req
= &arg
->req
;
974 struct drm_vmw_surface_create_req
*rep
= &arg
->rep
;
975 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
976 struct vmw_surface
*srf
;
977 struct vmw_user_surface
*user_srf
;
978 struct drm_vmw_size __user
*user_sizes
;
979 struct ttm_base_object
*base
;
982 ret
= vmw_surface_handle_reference(dev_priv
, file_priv
, req
->sid
,
983 req
->handle_type
, &base
);
984 if (unlikely(ret
!= 0))
987 user_srf
= container_of(base
, struct vmw_user_surface
, prime
.base
);
988 srf
= &user_srf
->srf
;
990 rep
->flags
= srf
->flags
;
991 rep
->format
= srf
->format
;
992 memcpy(rep
->mip_levels
, srf
->mip_levels
, sizeof(srf
->mip_levels
));
993 user_sizes
= (struct drm_vmw_size __user
*)(unsigned long)
997 ret
= copy_to_user(user_sizes
, &srf
->base_size
,
998 sizeof(srf
->base_size
));
999 if (unlikely(ret
!= 0)) {
1000 DRM_ERROR("copy_to_user failed %p %u\n",
1001 user_sizes
, srf
->num_sizes
);
1002 ttm_ref_object_base_unref(tfile
, base
->hash
.key
, TTM_REF_USAGE
);
1006 ttm_base_object_unref(&base
);
1012 * vmw_surface_define_encode - Encode a surface_define command.
1014 * @srf: Pointer to a struct vmw_surface object.
1015 * @cmd_space: Pointer to memory area in which the commands should be encoded.
1017 static int vmw_gb_surface_create(struct vmw_resource
*res
)
1019 struct vmw_private
*dev_priv
= res
->dev_priv
;
1020 struct vmw_surface
*srf
= vmw_res_to_srf(res
);
1021 uint32_t cmd_len
, submit_len
;
1024 SVGA3dCmdHeader header
;
1025 SVGA3dCmdDefineGBSurface body
;
1028 if (likely(res
->id
!= -1))
1031 (void) vmw_3d_resource_inc(dev_priv
, false);
1032 ret
= vmw_resource_alloc_id(res
);
1033 if (unlikely(ret
!= 0)) {
1034 DRM_ERROR("Failed to allocate a surface id.\n");
1038 if (unlikely(res
->id
>= VMWGFX_NUM_GB_SURFACE
)) {
1043 cmd_len
= sizeof(cmd
->body
);
1044 submit_len
= sizeof(*cmd
);
1045 cmd
= vmw_fifo_reserve(dev_priv
, submit_len
);
1046 if (unlikely(cmd
== NULL
)) {
1047 DRM_ERROR("Failed reserving FIFO space for surface "
1053 cmd
->header
.id
= SVGA_3D_CMD_DEFINE_GB_SURFACE
;
1054 cmd
->header
.size
= cmd_len
;
1055 cmd
->body
.sid
= srf
->res
.id
;
1056 cmd
->body
.surfaceFlags
= srf
->flags
;
1057 cmd
->body
.format
= cpu_to_le32(srf
->format
);
1058 cmd
->body
.numMipLevels
= srf
->mip_levels
[0];
1059 cmd
->body
.multisampleCount
= srf
->multisample_count
;
1060 cmd
->body
.autogenFilter
= srf
->autogen_filter
;
1061 cmd
->body
.size
.width
= srf
->base_size
.width
;
1062 cmd
->body
.size
.height
= srf
->base_size
.height
;
1063 cmd
->body
.size
.depth
= srf
->base_size
.depth
;
1064 vmw_fifo_commit(dev_priv
, submit_len
);
1069 vmw_resource_release_id(res
);
1071 vmw_3d_resource_dec(dev_priv
, false);
1076 static int vmw_gb_surface_bind(struct vmw_resource
*res
,
1077 struct ttm_validate_buffer
*val_buf
)
1079 struct vmw_private
*dev_priv
= res
->dev_priv
;
1081 SVGA3dCmdHeader header
;
1082 SVGA3dCmdBindGBSurface body
;
1085 SVGA3dCmdHeader header
;
1086 SVGA3dCmdUpdateGBSurface body
;
1088 uint32_t submit_size
;
1089 struct ttm_buffer_object
*bo
= val_buf
->bo
;
1091 BUG_ON(bo
->mem
.mem_type
!= VMW_PL_MOB
);
1093 submit_size
= sizeof(*cmd1
) + (res
->backup_dirty
? sizeof(*cmd2
) : 0);
1095 cmd1
= vmw_fifo_reserve(dev_priv
, submit_size
);
1096 if (unlikely(cmd1
== NULL
)) {
1097 DRM_ERROR("Failed reserving FIFO space for surface "
1102 cmd1
->header
.id
= SVGA_3D_CMD_BIND_GB_SURFACE
;
1103 cmd1
->header
.size
= sizeof(cmd1
->body
);
1104 cmd1
->body
.sid
= res
->id
;
1105 cmd1
->body
.mobid
= bo
->mem
.start
;
1106 if (res
->backup_dirty
) {
1107 cmd2
= (void *) &cmd1
[1];
1108 cmd2
->header
.id
= SVGA_3D_CMD_UPDATE_GB_SURFACE
;
1109 cmd2
->header
.size
= sizeof(cmd2
->body
);
1110 cmd2
->body
.sid
= res
->id
;
1111 res
->backup_dirty
= false;
1113 vmw_fifo_commit(dev_priv
, submit_size
);
1118 static int vmw_gb_surface_unbind(struct vmw_resource
*res
,
1120 struct ttm_validate_buffer
*val_buf
)
1122 struct vmw_private
*dev_priv
= res
->dev_priv
;
1123 struct ttm_buffer_object
*bo
= val_buf
->bo
;
1124 struct vmw_fence_obj
*fence
;
1127 SVGA3dCmdHeader header
;
1128 SVGA3dCmdReadbackGBSurface body
;
1131 SVGA3dCmdHeader header
;
1132 SVGA3dCmdInvalidateGBSurface body
;
1135 SVGA3dCmdHeader header
;
1136 SVGA3dCmdBindGBSurface body
;
1138 uint32_t submit_size
;
1142 BUG_ON(bo
->mem
.mem_type
!= VMW_PL_MOB
);
1144 submit_size
= sizeof(*cmd3
) + (readback
? sizeof(*cmd1
) : sizeof(*cmd2
));
1145 cmd
= vmw_fifo_reserve(dev_priv
, submit_size
);
1146 if (unlikely(cmd
== NULL
)) {
1147 DRM_ERROR("Failed reserving FIFO space for surface "
1153 cmd1
= (void *) cmd
;
1154 cmd1
->header
.id
= SVGA_3D_CMD_READBACK_GB_SURFACE
;
1155 cmd1
->header
.size
= sizeof(cmd1
->body
);
1156 cmd1
->body
.sid
= res
->id
;
1157 cmd3
= (void *) &cmd1
[1];
1159 cmd2
= (void *) cmd
;
1160 cmd2
->header
.id
= SVGA_3D_CMD_INVALIDATE_GB_SURFACE
;
1161 cmd2
->header
.size
= sizeof(cmd2
->body
);
1162 cmd2
->body
.sid
= res
->id
;
1163 cmd3
= (void *) &cmd2
[1];
1166 cmd3
->header
.id
= SVGA_3D_CMD_BIND_GB_SURFACE
;
1167 cmd3
->header
.size
= sizeof(cmd3
->body
);
1168 cmd3
->body
.sid
= res
->id
;
1169 cmd3
->body
.mobid
= SVGA3D_INVALID_ID
;
1171 vmw_fifo_commit(dev_priv
, submit_size
);
1174 * Create a fence object and fence the backup buffer.
1177 (void) vmw_execbuf_fence_commands(NULL
, dev_priv
,
1180 vmw_fence_single_bo(val_buf
->bo
, fence
);
1182 if (likely(fence
!= NULL
))
1183 vmw_fence_obj_unreference(&fence
);
1188 static int vmw_gb_surface_destroy(struct vmw_resource
*res
)
1190 struct vmw_private
*dev_priv
= res
->dev_priv
;
1192 SVGA3dCmdHeader header
;
1193 SVGA3dCmdDestroyGBSurface body
;
1196 if (likely(res
->id
== -1))
1199 mutex_lock(&dev_priv
->binding_mutex
);
1200 vmw_context_binding_res_list_scrub(&res
->binding_head
);
1202 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
1203 if (unlikely(cmd
== NULL
)) {
1204 DRM_ERROR("Failed reserving FIFO space for surface "
1206 mutex_unlock(&dev_priv
->binding_mutex
);
1210 cmd
->header
.id
= SVGA_3D_CMD_DESTROY_GB_SURFACE
;
1211 cmd
->header
.size
= sizeof(cmd
->body
);
1212 cmd
->body
.sid
= res
->id
;
1213 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
1214 mutex_unlock(&dev_priv
->binding_mutex
);
1215 vmw_resource_release_id(res
);
1216 vmw_3d_resource_dec(dev_priv
, false);
1222 * vmw_gb_surface_define_ioctl - Ioctl function implementing
1223 * the user surface define functionality.
1225 * @dev: Pointer to a struct drm_device.
1226 * @data: Pointer to data copied from / to user-space.
1227 * @file_priv: Pointer to a drm file private structure.
1229 int vmw_gb_surface_define_ioctl(struct drm_device
*dev
, void *data
,
1230 struct drm_file
*file_priv
)
1232 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1233 struct vmw_user_surface
*user_srf
;
1234 struct vmw_surface
*srf
;
1235 struct vmw_resource
*res
;
1236 struct vmw_resource
*tmp
;
1237 union drm_vmw_gb_surface_create_arg
*arg
=
1238 (union drm_vmw_gb_surface_create_arg
*)data
;
1239 struct drm_vmw_gb_surface_create_req
*req
= &arg
->req
;
1240 struct drm_vmw_gb_surface_create_rep
*rep
= &arg
->rep
;
1241 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1244 const struct svga3d_surface_desc
*desc
;
1245 uint32_t backup_handle
;
1247 if (unlikely(vmw_user_surface_size
== 0))
1248 vmw_user_surface_size
= ttm_round_pot(sizeof(*user_srf
)) +
1251 size
= vmw_user_surface_size
+ 128;
1253 desc
= svga3dsurface_get_desc(req
->format
);
1254 if (unlikely(desc
->block_desc
== SVGA3DBLOCKDESC_NONE
)) {
1255 DRM_ERROR("Invalid surface format for surface creation.\n");
1259 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
1260 if (unlikely(ret
!= 0))
1263 ret
= ttm_mem_global_alloc(vmw_mem_glob(dev_priv
),
1265 if (unlikely(ret
!= 0)) {
1266 if (ret
!= -ERESTARTSYS
)
1267 DRM_ERROR("Out of graphics memory for surface"
1272 user_srf
= kzalloc(sizeof(*user_srf
), GFP_KERNEL
);
1273 if (unlikely(user_srf
== NULL
)) {
1275 goto out_no_user_srf
;
1278 srf
= &user_srf
->srf
;
1281 srf
->flags
= req
->svga3d_flags
;
1282 srf
->format
= req
->format
;
1283 srf
->scanout
= req
->drm_surface_flags
& drm_vmw_surface_flag_scanout
;
1284 srf
->mip_levels
[0] = req
->mip_levels
;
1287 srf
->offsets
= NULL
;
1288 user_srf
->size
= size
;
1289 srf
->base_size
= req
->base_size
;
1290 srf
->autogen_filter
= SVGA3D_TEX_FILTER_NONE
;
1291 srf
->multisample_count
= req
->multisample_count
;
1292 res
->backup_size
= svga3dsurface_get_serialized_size
1293 (srf
->format
, srf
->base_size
, srf
->mip_levels
[0],
1294 srf
->flags
& SVGA3D_SURFACE_CUBEMAP
);
1296 user_srf
->prime
.base
.shareable
= false;
1297 user_srf
->prime
.base
.tfile
= NULL
;
1298 if (drm_is_primary_client(file_priv
))
1299 user_srf
->master
= drm_master_get(file_priv
->master
);
1302 * From this point, the generic resource management functions
1303 * destroy the object on failure.
1306 ret
= vmw_surface_init(dev_priv
, srf
, vmw_user_surface_free
);
1307 if (unlikely(ret
!= 0))
1310 if (req
->buffer_handle
!= SVGA3D_INVALID_ID
) {
1311 ret
= vmw_user_dmabuf_lookup(tfile
, req
->buffer_handle
,
1313 } else if (req
->drm_surface_flags
&
1314 drm_vmw_surface_flag_create_buffer
)
1315 ret
= vmw_user_dmabuf_alloc(dev_priv
, tfile
,
1317 req
->drm_surface_flags
&
1318 drm_vmw_surface_flag_shareable
,
1322 if (unlikely(ret
!= 0)) {
1323 vmw_resource_unreference(&res
);
1327 tmp
= vmw_resource_reference(&srf
->res
);
1328 ret
= ttm_prime_object_init(tfile
, res
->backup_size
, &user_srf
->prime
,
1329 req
->drm_surface_flags
&
1330 drm_vmw_surface_flag_shareable
,
1332 &vmw_user_surface_base_release
, NULL
);
1334 if (unlikely(ret
!= 0)) {
1335 vmw_resource_unreference(&tmp
);
1336 vmw_resource_unreference(&res
);
1340 rep
->handle
= user_srf
->prime
.base
.hash
.key
;
1341 rep
->backup_size
= res
->backup_size
;
1343 rep
->buffer_map_handle
=
1344 drm_vma_node_offset_addr(&res
->backup
->base
.vma_node
);
1345 rep
->buffer_size
= res
->backup
->base
.num_pages
* PAGE_SIZE
;
1346 rep
->buffer_handle
= backup_handle
;
1348 rep
->buffer_map_handle
= 0;
1349 rep
->buffer_size
= 0;
1350 rep
->buffer_handle
= SVGA3D_INVALID_ID
;
1353 vmw_resource_unreference(&res
);
1355 ttm_read_unlock(&dev_priv
->reservation_sem
);
1358 ttm_mem_global_free(vmw_mem_glob(dev_priv
), size
);
1360 ttm_read_unlock(&dev_priv
->reservation_sem
);
1365 * vmw_gb_surface_reference_ioctl - Ioctl function implementing
1366 * the user surface reference functionality.
1368 * @dev: Pointer to a struct drm_device.
1369 * @data: Pointer to data copied from / to user-space.
1370 * @file_priv: Pointer to a drm file private structure.
1372 int vmw_gb_surface_reference_ioctl(struct drm_device
*dev
, void *data
,
1373 struct drm_file
*file_priv
)
1375 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1376 union drm_vmw_gb_surface_reference_arg
*arg
=
1377 (union drm_vmw_gb_surface_reference_arg
*)data
;
1378 struct drm_vmw_surface_arg
*req
= &arg
->req
;
1379 struct drm_vmw_gb_surface_ref_rep
*rep
= &arg
->rep
;
1380 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1381 struct vmw_surface
*srf
;
1382 struct vmw_user_surface
*user_srf
;
1383 struct ttm_base_object
*base
;
1384 uint32_t backup_handle
;
1387 ret
= vmw_surface_handle_reference(dev_priv
, file_priv
, req
->sid
,
1388 req
->handle_type
, &base
);
1389 if (unlikely(ret
!= 0))
1392 user_srf
= container_of(base
, struct vmw_user_surface
, prime
.base
);
1393 srf
= &user_srf
->srf
;
1394 if (srf
->res
.backup
== NULL
) {
1395 DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
1396 goto out_bad_resource
;
1399 mutex_lock(&dev_priv
->cmdbuf_mutex
); /* Protect res->backup */
1400 ret
= vmw_user_dmabuf_reference(tfile
, srf
->res
.backup
,
1402 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
1404 if (unlikely(ret
!= 0)) {
1405 DRM_ERROR("Could not add a reference to a GB surface "
1406 "backup buffer.\n");
1407 (void) ttm_ref_object_base_unref(tfile
, base
->hash
.key
,
1409 goto out_bad_resource
;
1412 rep
->creq
.svga3d_flags
= srf
->flags
;
1413 rep
->creq
.format
= srf
->format
;
1414 rep
->creq
.mip_levels
= srf
->mip_levels
[0];
1415 rep
->creq
.drm_surface_flags
= 0;
1416 rep
->creq
.multisample_count
= srf
->multisample_count
;
1417 rep
->creq
.autogen_filter
= srf
->autogen_filter
;
1418 rep
->creq
.buffer_handle
= backup_handle
;
1419 rep
->creq
.base_size
= srf
->base_size
;
1420 rep
->crep
.handle
= user_srf
->prime
.base
.hash
.key
;
1421 rep
->crep
.backup_size
= srf
->res
.backup_size
;
1422 rep
->crep
.buffer_handle
= backup_handle
;
1423 rep
->crep
.buffer_map_handle
=
1424 drm_vma_node_offset_addr(&srf
->res
.backup
->base
.vma_node
);
1425 rep
->crep
.buffer_size
= srf
->res
.backup
->base
.num_pages
* PAGE_SIZE
;
1428 ttm_base_object_unref(&base
);