1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <drm/ttm/ttm_placement.h>
30 #include "vmwgfx_drv.h"
31 #include "vmwgfx_resource_priv.h"
32 #include "vmwgfx_so.h"
33 #include "vmwgfx_binding.h"
34 #include "device_include/svga3d_surfacedefs.h"
36 #define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
37 #define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
38 #define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
39 (svga3d_flags & ((uint64_t)U32_MAX))
42 * struct vmw_user_surface - User-space visible surface resource
44 * @base: The TTM base object handling user-space visibility.
45 * @srf: The surface metadata.
46 * @size: TTM accounting size for the surface.
47 * @master: master of the creating client. Used for security check.
49 struct vmw_user_surface
{
50 struct ttm_prime_object prime
;
51 struct vmw_surface srf
;
53 struct drm_master
*master
;
54 struct ttm_base_object
*backup_base
;
58 * struct vmw_surface_offset - Backing store mip level offset info
60 * @face: Surface face.
62 * @bo_offset: Offset into backing store of this mip level.
65 struct vmw_surface_offset
{
72 * vmw_surface_dirty - Surface dirty-tracker
73 * @cache: Cached layout information of the surface.
74 * @size: Accounting size for the struct vmw_surface_dirty.
75 * @num_subres: Number of subresources.
76 * @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource.
78 struct vmw_surface_dirty
{
79 struct svga3dsurface_cache cache
;
85 static void vmw_user_surface_free(struct vmw_resource
*res
);
86 static struct vmw_resource
*
87 vmw_user_surface_base_to_res(struct ttm_base_object
*base
);
88 static int vmw_legacy_srf_bind(struct vmw_resource
*res
,
89 struct ttm_validate_buffer
*val_buf
);
90 static int vmw_legacy_srf_unbind(struct vmw_resource
*res
,
92 struct ttm_validate_buffer
*val_buf
);
93 static int vmw_legacy_srf_create(struct vmw_resource
*res
);
94 static int vmw_legacy_srf_destroy(struct vmw_resource
*res
);
95 static int vmw_gb_surface_create(struct vmw_resource
*res
);
96 static int vmw_gb_surface_bind(struct vmw_resource
*res
,
97 struct ttm_validate_buffer
*val_buf
);
98 static int vmw_gb_surface_unbind(struct vmw_resource
*res
,
100 struct ttm_validate_buffer
*val_buf
);
101 static int vmw_gb_surface_destroy(struct vmw_resource
*res
);
103 vmw_gb_surface_define_internal(struct drm_device
*dev
,
104 struct drm_vmw_gb_surface_create_ext_req
*req
,
105 struct drm_vmw_gb_surface_create_rep
*rep
,
106 struct drm_file
*file_priv
);
108 vmw_gb_surface_reference_internal(struct drm_device
*dev
,
109 struct drm_vmw_surface_arg
*req
,
110 struct drm_vmw_gb_surface_ref_ext_rep
*rep
,
111 struct drm_file
*file_priv
);
113 static void vmw_surface_dirty_free(struct vmw_resource
*res
);
114 static int vmw_surface_dirty_alloc(struct vmw_resource
*res
);
115 static int vmw_surface_dirty_sync(struct vmw_resource
*res
);
116 static void vmw_surface_dirty_range_add(struct vmw_resource
*res
, size_t start
,
118 static int vmw_surface_clean(struct vmw_resource
*res
);
120 static const struct vmw_user_resource_conv user_surface_conv
= {
121 .object_type
= VMW_RES_SURFACE
,
122 .base_obj_to_res
= vmw_user_surface_base_to_res
,
123 .res_free
= vmw_user_surface_free
126 const struct vmw_user_resource_conv
*user_surface_converter
=
130 static uint64_t vmw_user_surface_size
;
132 static const struct vmw_res_func vmw_legacy_surface_func
= {
133 .res_type
= vmw_res_surface
,
134 .needs_backup
= false,
138 .type_name
= "legacy surfaces",
139 .backup_placement
= &vmw_srf_placement
,
140 .create
= &vmw_legacy_srf_create
,
141 .destroy
= &vmw_legacy_srf_destroy
,
142 .bind
= &vmw_legacy_srf_bind
,
143 .unbind
= &vmw_legacy_srf_unbind
146 static const struct vmw_res_func vmw_gb_surface_func
= {
147 .res_type
= vmw_res_surface
,
148 .needs_backup
= true,
152 .type_name
= "guest backed surfaces",
153 .backup_placement
= &vmw_mob_placement
,
154 .create
= vmw_gb_surface_create
,
155 .destroy
= vmw_gb_surface_destroy
,
156 .bind
= vmw_gb_surface_bind
,
157 .unbind
= vmw_gb_surface_unbind
,
158 .dirty_alloc
= vmw_surface_dirty_alloc
,
159 .dirty_free
= vmw_surface_dirty_free
,
160 .dirty_sync
= vmw_surface_dirty_sync
,
161 .dirty_range_add
= vmw_surface_dirty_range_add
,
162 .clean
= vmw_surface_clean
,
166 * struct vmw_surface_dma - SVGA3D DMA command
168 struct vmw_surface_dma
{
169 SVGA3dCmdHeader header
;
170 SVGA3dCmdSurfaceDMA body
;
172 SVGA3dCmdSurfaceDMASuffix suffix
;
176 * struct vmw_surface_define - SVGA3D Surface Define command
178 struct vmw_surface_define
{
179 SVGA3dCmdHeader header
;
180 SVGA3dCmdDefineSurface body
;
184 * struct vmw_surface_destroy - SVGA3D Surface Destroy command
186 struct vmw_surface_destroy
{
187 SVGA3dCmdHeader header
;
188 SVGA3dCmdDestroySurface body
;
193 * vmw_surface_dma_size - Compute fifo size for a dma command.
195 * @srf: Pointer to a struct vmw_surface
197 * Computes the required size for a surface dma command for backup or
198 * restoration of the surface represented by @srf.
200 static inline uint32_t vmw_surface_dma_size(const struct vmw_surface
*srf
)
202 return srf
->num_sizes
* sizeof(struct vmw_surface_dma
);
207 * vmw_surface_define_size - Compute fifo size for a surface define command.
209 * @srf: Pointer to a struct vmw_surface
211 * Computes the required size for a surface define command for the definition
212 * of the surface represented by @srf.
214 static inline uint32_t vmw_surface_define_size(const struct vmw_surface
*srf
)
216 return sizeof(struct vmw_surface_define
) + srf
->num_sizes
*
222 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
224 * Computes the required size for a surface destroy command for the destruction
227 static inline uint32_t vmw_surface_destroy_size(void)
229 return sizeof(struct vmw_surface_destroy
);
233 * vmw_surface_destroy_encode - Encode a surface_destroy command.
235 * @id: The surface id
236 * @cmd_space: Pointer to memory area in which the commands should be encoded.
238 static void vmw_surface_destroy_encode(uint32_t id
,
241 struct vmw_surface_destroy
*cmd
= (struct vmw_surface_destroy
*)
244 cmd
->header
.id
= SVGA_3D_CMD_SURFACE_DESTROY
;
245 cmd
->header
.size
= sizeof(cmd
->body
);
250 * vmw_surface_define_encode - Encode a surface_define command.
252 * @srf: Pointer to a struct vmw_surface object.
253 * @cmd_space: Pointer to memory area in which the commands should be encoded.
255 static void vmw_surface_define_encode(const struct vmw_surface
*srf
,
258 struct vmw_surface_define
*cmd
= (struct vmw_surface_define
*)
260 struct drm_vmw_size
*src_size
;
261 SVGA3dSize
*cmd_size
;
265 cmd_len
= sizeof(cmd
->body
) + srf
->num_sizes
* sizeof(SVGA3dSize
);
267 cmd
->header
.id
= SVGA_3D_CMD_SURFACE_DEFINE
;
268 cmd
->header
.size
= cmd_len
;
269 cmd
->body
.sid
= srf
->res
.id
;
271 * Downcast of surfaceFlags, was upcasted when received from user-space,
272 * since driver internally stores as 64 bit.
273 * For legacy surface define only 32 bit flag is supported.
275 cmd
->body
.surfaceFlags
= (SVGA3dSurface1Flags
)srf
->flags
;
276 cmd
->body
.format
= srf
->format
;
277 for (i
= 0; i
< DRM_VMW_MAX_SURFACE_FACES
; ++i
)
278 cmd
->body
.face
[i
].numMipLevels
= srf
->mip_levels
[i
];
281 cmd_size
= (SVGA3dSize
*) cmd
;
282 src_size
= srf
->sizes
;
284 for (i
= 0; i
< srf
->num_sizes
; ++i
, cmd_size
++, src_size
++) {
285 cmd_size
->width
= src_size
->width
;
286 cmd_size
->height
= src_size
->height
;
287 cmd_size
->depth
= src_size
->depth
;
292 * vmw_surface_dma_encode - Encode a surface_dma command.
294 * @srf: Pointer to a struct vmw_surface object.
295 * @cmd_space: Pointer to memory area in which the commands should be encoded.
296 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
297 * should be placed or read from.
298 * @to_surface: Boolean whether to DMA to the surface or from the surface.
300 static void vmw_surface_dma_encode(struct vmw_surface
*srf
,
302 const SVGAGuestPtr
*ptr
,
306 struct vmw_surface_dma
*cmd
= (struct vmw_surface_dma
*)cmd_space
;
307 const struct svga3d_surface_desc
*desc
=
308 svga3dsurface_get_desc(srf
->format
);
310 for (i
= 0; i
< srf
->num_sizes
; ++i
) {
311 SVGA3dCmdHeader
*header
= &cmd
->header
;
312 SVGA3dCmdSurfaceDMA
*body
= &cmd
->body
;
313 SVGA3dCopyBox
*cb
= &cmd
->cb
;
314 SVGA3dCmdSurfaceDMASuffix
*suffix
= &cmd
->suffix
;
315 const struct vmw_surface_offset
*cur_offset
= &srf
->offsets
[i
];
316 const struct drm_vmw_size
*cur_size
= &srf
->sizes
[i
];
318 header
->id
= SVGA_3D_CMD_SURFACE_DMA
;
319 header
->size
= sizeof(*body
) + sizeof(*cb
) + sizeof(*suffix
);
321 body
->guest
.ptr
= *ptr
;
322 body
->guest
.ptr
.offset
+= cur_offset
->bo_offset
;
323 body
->guest
.pitch
= svga3dsurface_calculate_pitch(desc
,
325 body
->host
.sid
= srf
->res
.id
;
326 body
->host
.face
= cur_offset
->face
;
327 body
->host
.mipmap
= cur_offset
->mip
;
328 body
->transfer
= ((to_surface
) ? SVGA3D_WRITE_HOST_VRAM
:
329 SVGA3D_READ_HOST_VRAM
);
336 cb
->w
= cur_size
->width
;
337 cb
->h
= cur_size
->height
;
338 cb
->d
= cur_size
->depth
;
340 suffix
->suffixSize
= sizeof(*suffix
);
341 suffix
->maximumOffset
=
342 svga3dsurface_get_image_buffer_size(desc
, cur_size
,
344 suffix
->flags
.discard
= 0;
345 suffix
->flags
.unsynchronized
= 0;
346 suffix
->flags
.reserved
= 0;
353 * vmw_hw_surface_destroy - destroy a Device surface
355 * @res: Pointer to a struct vmw_resource embedded in a struct
358 * Destroys a the device surface associated with a struct vmw_surface if
359 * any, and adjusts accounting and resource count accordingly.
361 static void vmw_hw_surface_destroy(struct vmw_resource
*res
)
364 struct vmw_private
*dev_priv
= res
->dev_priv
;
367 if (res
->func
->destroy
== vmw_gb_surface_destroy
) {
368 (void) vmw_gb_surface_destroy(res
);
374 cmd
= VMW_FIFO_RESERVE(dev_priv
, vmw_surface_destroy_size());
378 vmw_surface_destroy_encode(res
->id
, cmd
);
379 vmw_fifo_commit(dev_priv
, vmw_surface_destroy_size());
382 * used_memory_size_atomic, or separate lock
383 * to avoid taking dev_priv::cmdbuf_mutex in
387 mutex_lock(&dev_priv
->cmdbuf_mutex
);
388 dev_priv
->used_memory_size
-= res
->backup_size
;
389 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
394 * vmw_legacy_srf_create - Create a device surface as part of the
395 * resource validation process.
397 * @res: Pointer to a struct vmw_surface.
399 * If the surface doesn't have a hw id.
401 * Returns -EBUSY if there wasn't sufficient device resources to
402 * complete the validation. Retry after freeing up resources.
404 * May return other errors if the kernel is out of guest resources.
406 static int vmw_legacy_srf_create(struct vmw_resource
*res
)
408 struct vmw_private
*dev_priv
= res
->dev_priv
;
409 struct vmw_surface
*srf
;
410 uint32_t submit_size
;
414 if (likely(res
->id
!= -1))
417 srf
= vmw_res_to_srf(res
);
418 if (unlikely(dev_priv
->used_memory_size
+ res
->backup_size
>=
419 dev_priv
->memory_size
))
423 * Alloc id for the resource.
426 ret
= vmw_resource_alloc_id(res
);
427 if (unlikely(ret
!= 0)) {
428 DRM_ERROR("Failed to allocate a surface id.\n");
432 if (unlikely(res
->id
>= SVGA3D_MAX_SURFACE_IDS
)) {
438 * Encode surface define- commands.
441 submit_size
= vmw_surface_define_size(srf
);
442 cmd
= VMW_FIFO_RESERVE(dev_priv
, submit_size
);
443 if (unlikely(!cmd
)) {
448 vmw_surface_define_encode(srf
, cmd
);
449 vmw_fifo_commit(dev_priv
, submit_size
);
450 vmw_fifo_resource_inc(dev_priv
);
453 * Surface memory usage accounting.
456 dev_priv
->used_memory_size
+= res
->backup_size
;
460 vmw_resource_release_id(res
);
466 * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
468 * @res: Pointer to a struct vmw_res embedded in a struct
470 * @val_buf: Pointer to a struct ttm_validate_buffer containing
471 * information about the backup buffer.
472 * @bind: Boolean wether to DMA to the surface.
474 * Transfer backup data to or from a legacy surface as part of the
475 * validation process.
476 * May return other errors if the kernel is out of guest resources.
477 * The backup buffer will be fenced or idle upon successful completion,
478 * and if the surface needs persistent backup storage, the backup buffer
479 * will also be returned reserved iff @bind is true.
481 static int vmw_legacy_srf_dma(struct vmw_resource
*res
,
482 struct ttm_validate_buffer
*val_buf
,
486 struct vmw_fence_obj
*fence
;
487 uint32_t submit_size
;
488 struct vmw_surface
*srf
= vmw_res_to_srf(res
);
490 struct vmw_private
*dev_priv
= res
->dev_priv
;
492 BUG_ON(!val_buf
->bo
);
493 submit_size
= vmw_surface_dma_size(srf
);
494 cmd
= VMW_FIFO_RESERVE(dev_priv
, submit_size
);
498 vmw_bo_get_guest_ptr(val_buf
->bo
, &ptr
);
499 vmw_surface_dma_encode(srf
, cmd
, &ptr
, bind
);
501 vmw_fifo_commit(dev_priv
, submit_size
);
504 * Create a fence object and fence the backup buffer.
507 (void) vmw_execbuf_fence_commands(NULL
, dev_priv
,
510 vmw_bo_fence_single(val_buf
->bo
, fence
);
512 if (likely(fence
!= NULL
))
513 vmw_fence_obj_unreference(&fence
);
519 * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
520 * surface validation process.
522 * @res: Pointer to a struct vmw_res embedded in a struct
524 * @val_buf: Pointer to a struct ttm_validate_buffer containing
525 * information about the backup buffer.
527 * This function will copy backup data to the surface if the
528 * backup buffer is dirty.
530 static int vmw_legacy_srf_bind(struct vmw_resource
*res
,
531 struct ttm_validate_buffer
*val_buf
)
533 if (!res
->backup_dirty
)
536 return vmw_legacy_srf_dma(res
, val_buf
, true);
541 * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
542 * surface eviction process.
544 * @res: Pointer to a struct vmw_res embedded in a struct
546 * @val_buf: Pointer to a struct ttm_validate_buffer containing
547 * information about the backup buffer.
549 * This function will copy backup data from the surface.
551 static int vmw_legacy_srf_unbind(struct vmw_resource
*res
,
553 struct ttm_validate_buffer
*val_buf
)
555 if (unlikely(readback
))
556 return vmw_legacy_srf_dma(res
, val_buf
, false);
561 * vmw_legacy_srf_destroy - Destroy a device surface as part of a
562 * resource eviction process.
564 * @res: Pointer to a struct vmw_res embedded in a struct
567 static int vmw_legacy_srf_destroy(struct vmw_resource
*res
)
569 struct vmw_private
*dev_priv
= res
->dev_priv
;
570 uint32_t submit_size
;
573 BUG_ON(res
->id
== -1);
576 * Encode the dma- and surface destroy commands.
579 submit_size
= vmw_surface_destroy_size();
580 cmd
= VMW_FIFO_RESERVE(dev_priv
, submit_size
);
584 vmw_surface_destroy_encode(res
->id
, cmd
);
585 vmw_fifo_commit(dev_priv
, submit_size
);
588 * Surface memory usage accounting.
591 dev_priv
->used_memory_size
-= res
->backup_size
;
594 * Release the surface ID.
597 vmw_resource_release_id(res
);
598 vmw_fifo_resource_dec(dev_priv
);
605 * vmw_surface_init - initialize a struct vmw_surface
607 * @dev_priv: Pointer to a device private struct.
608 * @srf: Pointer to the struct vmw_surface to initialize.
609 * @res_free: Pointer to a resource destructor used to free
612 static int vmw_surface_init(struct vmw_private
*dev_priv
,
613 struct vmw_surface
*srf
,
614 void (*res_free
) (struct vmw_resource
*res
))
617 struct vmw_resource
*res
= &srf
->res
;
620 ret
= vmw_resource_init(dev_priv
, res
, true, res_free
,
621 (dev_priv
->has_mob
) ? &vmw_gb_surface_func
:
622 &vmw_legacy_surface_func
);
624 if (unlikely(ret
!= 0)) {
630 * The surface won't be visible to hardware until a
634 INIT_LIST_HEAD(&srf
->view_list
);
635 res
->hw_destroy
= vmw_hw_surface_destroy
;
640 * vmw_user_surface_base_to_res - TTM base object to resource converter for
641 * user visible surfaces
643 * @base: Pointer to a TTM base object
645 * Returns the struct vmw_resource embedded in a struct vmw_surface
646 * for the user-visible object identified by the TTM base object @base.
648 static struct vmw_resource
*
649 vmw_user_surface_base_to_res(struct ttm_base_object
*base
)
651 return &(container_of(base
, struct vmw_user_surface
,
652 prime
.base
)->srf
.res
);
656 * vmw_user_surface_free - User visible surface resource destructor
658 * @res: A struct vmw_resource embedded in a struct vmw_surface.
660 static void vmw_user_surface_free(struct vmw_resource
*res
)
662 struct vmw_surface
*srf
= vmw_res_to_srf(res
);
663 struct vmw_user_surface
*user_srf
=
664 container_of(srf
, struct vmw_user_surface
, srf
);
665 struct vmw_private
*dev_priv
= srf
->res
.dev_priv
;
666 uint32_t size
= user_srf
->size
;
668 WARN_ON_ONCE(res
->dirty
);
669 if (user_srf
->master
)
670 drm_master_put(&user_srf
->master
);
673 kfree(srf
->snooper
.image
);
674 ttm_prime_object_kfree(user_srf
, prime
);
675 ttm_mem_global_free(vmw_mem_glob(dev_priv
), size
);
679 * vmw_user_surface_free - User visible surface TTM base object destructor
681 * @p_base: Pointer to a pointer to a TTM base object
682 * embedded in a struct vmw_user_surface.
684 * Drops the base object's reference on its resource, and the
685 * pointer pointed to by *p_base is set to NULL.
687 static void vmw_user_surface_base_release(struct ttm_base_object
**p_base
)
689 struct ttm_base_object
*base
= *p_base
;
690 struct vmw_user_surface
*user_srf
=
691 container_of(base
, struct vmw_user_surface
, prime
.base
);
692 struct vmw_resource
*res
= &user_srf
->srf
.res
;
695 if (user_srf
->backup_base
)
696 ttm_base_object_unref(&user_srf
->backup_base
);
697 vmw_resource_unreference(&res
);
701 * vmw_user_surface_destroy_ioctl - Ioctl function implementing
702 * the user surface destroy functionality.
704 * @dev: Pointer to a struct drm_device.
705 * @data: Pointer to data copied from / to user-space.
706 * @file_priv: Pointer to a drm file private structure.
708 int vmw_surface_destroy_ioctl(struct drm_device
*dev
, void *data
,
709 struct drm_file
*file_priv
)
711 struct drm_vmw_surface_arg
*arg
= (struct drm_vmw_surface_arg
*)data
;
712 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
714 return ttm_ref_object_base_unref(tfile
, arg
->sid
, TTM_REF_USAGE
);
718 * vmw_user_surface_define_ioctl - Ioctl function implementing
719 * the user surface define functionality.
721 * @dev: Pointer to a struct drm_device.
722 * @data: Pointer to data copied from / to user-space.
723 * @file_priv: Pointer to a drm file private structure.
725 int vmw_surface_define_ioctl(struct drm_device
*dev
, void *data
,
726 struct drm_file
*file_priv
)
728 struct vmw_private
*dev_priv
= vmw_priv(dev
);
729 struct vmw_user_surface
*user_srf
;
730 struct vmw_surface
*srf
;
731 struct vmw_resource
*res
;
732 struct vmw_resource
*tmp
;
733 union drm_vmw_surface_create_arg
*arg
=
734 (union drm_vmw_surface_create_arg
*)data
;
735 struct drm_vmw_surface_create_req
*req
= &arg
->req
;
736 struct drm_vmw_surface_arg
*rep
= &arg
->rep
;
737 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
738 struct ttm_operation_ctx ctx
= {
739 .interruptible
= true,
744 uint32_t cur_bo_offset
;
745 struct drm_vmw_size
*cur_size
;
746 struct vmw_surface_offset
*cur_offset
;
749 const struct svga3d_surface_desc
*desc
;
751 if (unlikely(vmw_user_surface_size
== 0))
752 vmw_user_surface_size
= ttm_round_pot(sizeof(*user_srf
)) +
753 VMW_IDA_ACC_SIZE
+ TTM_OBJ_EXTRA_SIZE
;
756 for (i
= 0; i
< DRM_VMW_MAX_SURFACE_FACES
; ++i
) {
757 if (req
->mip_levels
[i
] > DRM_VMW_MAX_MIP_LEVELS
)
759 num_sizes
+= req
->mip_levels
[i
];
762 if (num_sizes
> DRM_VMW_MAX_SURFACE_FACES
* DRM_VMW_MAX_MIP_LEVELS
||
766 size
= vmw_user_surface_size
+
767 ttm_round_pot(num_sizes
* sizeof(struct drm_vmw_size
)) +
768 ttm_round_pot(num_sizes
* sizeof(struct vmw_surface_offset
));
770 desc
= svga3dsurface_get_desc(req
->format
);
771 if (unlikely(desc
->block_desc
== SVGA3DBLOCKDESC_NONE
)) {
772 VMW_DEBUG_USER("Invalid format %d for surface creation.\n",
777 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
778 if (unlikely(ret
!= 0))
781 ret
= ttm_mem_global_alloc(vmw_mem_glob(dev_priv
),
783 if (unlikely(ret
!= 0)) {
784 if (ret
!= -ERESTARTSYS
)
785 DRM_ERROR("Out of graphics memory for surface.\n");
789 user_srf
= kzalloc(sizeof(*user_srf
), GFP_KERNEL
);
790 if (unlikely(!user_srf
)) {
792 goto out_no_user_srf
;
795 srf
= &user_srf
->srf
;
798 /* Driver internally stores as 64-bit flags */
799 srf
->flags
= (SVGA3dSurfaceAllFlags
)req
->flags
;
800 srf
->format
= req
->format
;
801 srf
->scanout
= req
->scanout
;
803 memcpy(srf
->mip_levels
, req
->mip_levels
, sizeof(srf
->mip_levels
));
804 srf
->num_sizes
= num_sizes
;
805 user_srf
->size
= size
;
806 srf
->sizes
= memdup_user((struct drm_vmw_size __user
*)(unsigned long)
808 sizeof(*srf
->sizes
) * srf
->num_sizes
);
809 if (IS_ERR(srf
->sizes
)) {
810 ret
= PTR_ERR(srf
->sizes
);
813 srf
->offsets
= kmalloc_array(srf
->num_sizes
,
814 sizeof(*srf
->offsets
),
816 if (unlikely(!srf
->offsets
)) {
821 srf
->base_size
= *srf
->sizes
;
822 srf
->autogen_filter
= SVGA3D_TEX_FILTER_NONE
;
823 srf
->multisample_count
= 0;
824 srf
->multisample_pattern
= SVGA3D_MS_PATTERN_NONE
;
825 srf
->quality_level
= SVGA3D_MS_QUALITY_NONE
;
828 cur_offset
= srf
->offsets
;
829 cur_size
= srf
->sizes
;
831 for (i
= 0; i
< DRM_VMW_MAX_SURFACE_FACES
; ++i
) {
832 for (j
= 0; j
< srf
->mip_levels
[i
]; ++j
) {
833 uint32_t stride
= svga3dsurface_calculate_pitch
836 cur_offset
->face
= i
;
838 cur_offset
->bo_offset
= cur_bo_offset
;
839 cur_bo_offset
+= svga3dsurface_get_image_buffer_size
840 (desc
, cur_size
, stride
);
845 res
->backup_size
= cur_bo_offset
;
847 srf
->num_sizes
== 1 &&
848 srf
->sizes
[0].width
== 64 &&
849 srf
->sizes
[0].height
== 64 &&
850 srf
->format
== SVGA3D_A8R8G8B8
) {
852 srf
->snooper
.image
= kzalloc(64 * 64 * 4, GFP_KERNEL
);
853 if (!srf
->snooper
.image
) {
854 DRM_ERROR("Failed to allocate cursor_image\n");
859 srf
->snooper
.image
= NULL
;
862 user_srf
->prime
.base
.shareable
= false;
863 user_srf
->prime
.base
.tfile
= NULL
;
864 if (drm_is_primary_client(file_priv
))
865 user_srf
->master
= drm_master_get(file_priv
->master
);
868 * From this point, the generic resource management functions
869 * destroy the object on failure.
872 ret
= vmw_surface_init(dev_priv
, srf
, vmw_user_surface_free
);
873 if (unlikely(ret
!= 0))
877 * A gb-aware client referencing a shared surface will
878 * expect a backup buffer to be present.
880 if (dev_priv
->has_mob
&& req
->shareable
) {
881 uint32_t backup_handle
;
883 ret
= vmw_user_bo_alloc(dev_priv
, tfile
,
888 &user_srf
->backup_base
);
889 if (unlikely(ret
!= 0)) {
890 vmw_resource_unreference(&res
);
895 tmp
= vmw_resource_reference(&srf
->res
);
896 ret
= ttm_prime_object_init(tfile
, res
->backup_size
, &user_srf
->prime
,
897 req
->shareable
, VMW_RES_SURFACE
,
898 &vmw_user_surface_base_release
, NULL
);
900 if (unlikely(ret
!= 0)) {
901 vmw_resource_unreference(&tmp
);
902 vmw_resource_unreference(&res
);
906 rep
->sid
= user_srf
->prime
.base
.handle
;
907 vmw_resource_unreference(&res
);
909 ttm_read_unlock(&dev_priv
->reservation_sem
);
916 ttm_prime_object_kfree(user_srf
, prime
);
918 ttm_mem_global_free(vmw_mem_glob(dev_priv
), size
);
920 ttm_read_unlock(&dev_priv
->reservation_sem
);
926 vmw_surface_handle_reference(struct vmw_private
*dev_priv
,
927 struct drm_file
*file_priv
,
929 enum drm_vmw_handle_type handle_type
,
930 struct ttm_base_object
**base_p
)
932 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
933 struct vmw_user_surface
*user_srf
;
935 struct ttm_base_object
*base
;
938 if (handle_type
== DRM_VMW_HANDLE_PRIME
) {
939 ret
= ttm_prime_fd_to_handle(tfile
, u_handle
, &handle
);
940 if (unlikely(ret
!= 0))
947 base
= ttm_base_object_lookup_for_ref(dev_priv
->tdev
, handle
);
948 if (unlikely(!base
)) {
949 VMW_DEBUG_USER("Could not find surface to reference.\n");
953 if (unlikely(ttm_base_object_type(base
) != VMW_RES_SURFACE
)) {
954 VMW_DEBUG_USER("Referenced object is not a surface.\n");
955 goto out_bad_resource
;
958 if (handle_type
!= DRM_VMW_HANDLE_PRIME
) {
959 bool require_exist
= false;
961 user_srf
= container_of(base
, struct vmw_user_surface
,
964 /* Error out if we are unauthenticated primary */
965 if (drm_is_primary_client(file_priv
) &&
966 !file_priv
->authenticated
) {
968 goto out_bad_resource
;
972 * Make sure the surface creator has the same
973 * authenticating master, or is already registered with us.
975 if (drm_is_primary_client(file_priv
) &&
976 user_srf
->master
!= file_priv
->master
)
977 require_exist
= true;
979 if (unlikely(drm_is_render_client(file_priv
)))
980 require_exist
= true;
982 ret
= ttm_ref_object_add(tfile
, base
, TTM_REF_USAGE
, NULL
,
984 if (unlikely(ret
!= 0)) {
985 DRM_ERROR("Could not add a reference to a surface.\n");
986 goto out_bad_resource
;
994 ttm_base_object_unref(&base
);
996 if (handle_type
== DRM_VMW_HANDLE_PRIME
)
997 (void) ttm_ref_object_base_unref(tfile
, handle
, TTM_REF_USAGE
);
1003 * vmw_user_surface_define_ioctl - Ioctl function implementing
1004 * the user surface reference functionality.
1006 * @dev: Pointer to a struct drm_device.
1007 * @data: Pointer to data copied from / to user-space.
1008 * @file_priv: Pointer to a drm file private structure.
1010 int vmw_surface_reference_ioctl(struct drm_device
*dev
, void *data
,
1011 struct drm_file
*file_priv
)
1013 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1014 union drm_vmw_surface_reference_arg
*arg
=
1015 (union drm_vmw_surface_reference_arg
*)data
;
1016 struct drm_vmw_surface_arg
*req
= &arg
->req
;
1017 struct drm_vmw_surface_create_req
*rep
= &arg
->rep
;
1018 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1019 struct vmw_surface
*srf
;
1020 struct vmw_user_surface
*user_srf
;
1021 struct drm_vmw_size __user
*user_sizes
;
1022 struct ttm_base_object
*base
;
1025 ret
= vmw_surface_handle_reference(dev_priv
, file_priv
, req
->sid
,
1026 req
->handle_type
, &base
);
1027 if (unlikely(ret
!= 0))
1030 user_srf
= container_of(base
, struct vmw_user_surface
, prime
.base
);
1031 srf
= &user_srf
->srf
;
1033 /* Downcast of flags when sending back to user space */
1034 rep
->flags
= (uint32_t)srf
->flags
;
1035 rep
->format
= srf
->format
;
1036 memcpy(rep
->mip_levels
, srf
->mip_levels
, sizeof(srf
->mip_levels
));
1037 user_sizes
= (struct drm_vmw_size __user
*)(unsigned long)
1041 ret
= copy_to_user(user_sizes
, &srf
->base_size
,
1042 sizeof(srf
->base_size
));
1043 if (unlikely(ret
!= 0)) {
1044 VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes
,
1046 ttm_ref_object_base_unref(tfile
, base
->handle
, TTM_REF_USAGE
);
1050 ttm_base_object_unref(&base
);
1056 * vmw_surface_define_encode - Encode a surface_define command.
1058 * @srf: Pointer to a struct vmw_surface object.
1059 * @cmd_space: Pointer to memory area in which the commands should be encoded.
1061 static int vmw_gb_surface_create(struct vmw_resource
*res
)
1063 struct vmw_private
*dev_priv
= res
->dev_priv
;
1064 struct vmw_surface
*srf
= vmw_res_to_srf(res
);
1065 uint32_t cmd_len
, cmd_id
, submit_len
;
1068 SVGA3dCmdHeader header
;
1069 SVGA3dCmdDefineGBSurface body
;
1072 SVGA3dCmdHeader header
;
1073 SVGA3dCmdDefineGBSurface_v2 body
;
1076 SVGA3dCmdHeader header
;
1077 SVGA3dCmdDefineGBSurface_v3 body
;
1080 if (likely(res
->id
!= -1))
1083 vmw_fifo_resource_inc(dev_priv
);
1084 ret
= vmw_resource_alloc_id(res
);
1085 if (unlikely(ret
!= 0)) {
1086 DRM_ERROR("Failed to allocate a surface id.\n");
1090 if (unlikely(res
->id
>= VMWGFX_NUM_GB_SURFACE
)) {
1095 if (dev_priv
->has_sm4_1
&& srf
->array_size
> 0) {
1096 cmd_id
= SVGA_3D_CMD_DEFINE_GB_SURFACE_V3
;
1097 cmd_len
= sizeof(cmd3
->body
);
1098 submit_len
= sizeof(*cmd3
);
1099 } else if (srf
->array_size
> 0) {
1100 /* has_dx checked on creation time. */
1101 cmd_id
= SVGA_3D_CMD_DEFINE_GB_SURFACE_V2
;
1102 cmd_len
= sizeof(cmd2
->body
);
1103 submit_len
= sizeof(*cmd2
);
1105 cmd_id
= SVGA_3D_CMD_DEFINE_GB_SURFACE
;
1106 cmd_len
= sizeof(cmd
->body
);
1107 submit_len
= sizeof(*cmd
);
1110 cmd
= VMW_FIFO_RESERVE(dev_priv
, submit_len
);
1111 cmd2
= (typeof(cmd2
))cmd
;
1112 cmd3
= (typeof(cmd3
))cmd
;
1113 if (unlikely(!cmd
)) {
1118 if (dev_priv
->has_sm4_1
&& srf
->array_size
> 0) {
1119 cmd3
->header
.id
= cmd_id
;
1120 cmd3
->header
.size
= cmd_len
;
1121 cmd3
->body
.sid
= srf
->res
.id
;
1122 cmd3
->body
.surfaceFlags
= srf
->flags
;
1123 cmd3
->body
.format
= srf
->format
;
1124 cmd3
->body
.numMipLevels
= srf
->mip_levels
[0];
1125 cmd3
->body
.multisampleCount
= srf
->multisample_count
;
1126 cmd3
->body
.multisamplePattern
= srf
->multisample_pattern
;
1127 cmd3
->body
.qualityLevel
= srf
->quality_level
;
1128 cmd3
->body
.autogenFilter
= srf
->autogen_filter
;
1129 cmd3
->body
.size
.width
= srf
->base_size
.width
;
1130 cmd3
->body
.size
.height
= srf
->base_size
.height
;
1131 cmd3
->body
.size
.depth
= srf
->base_size
.depth
;
1132 cmd3
->body
.arraySize
= srf
->array_size
;
1133 } else if (srf
->array_size
> 0) {
1134 cmd2
->header
.id
= cmd_id
;
1135 cmd2
->header
.size
= cmd_len
;
1136 cmd2
->body
.sid
= srf
->res
.id
;
1137 cmd2
->body
.surfaceFlags
= srf
->flags
;
1138 cmd2
->body
.format
= srf
->format
;
1139 cmd2
->body
.numMipLevels
= srf
->mip_levels
[0];
1140 cmd2
->body
.multisampleCount
= srf
->multisample_count
;
1141 cmd2
->body
.autogenFilter
= srf
->autogen_filter
;
1142 cmd2
->body
.size
.width
= srf
->base_size
.width
;
1143 cmd2
->body
.size
.height
= srf
->base_size
.height
;
1144 cmd2
->body
.size
.depth
= srf
->base_size
.depth
;
1145 cmd2
->body
.arraySize
= srf
->array_size
;
1147 cmd
->header
.id
= cmd_id
;
1148 cmd
->header
.size
= cmd_len
;
1149 cmd
->body
.sid
= srf
->res
.id
;
1150 cmd
->body
.surfaceFlags
= srf
->flags
;
1151 cmd
->body
.format
= srf
->format
;
1152 cmd
->body
.numMipLevels
= srf
->mip_levels
[0];
1153 cmd
->body
.multisampleCount
= srf
->multisample_count
;
1154 cmd
->body
.autogenFilter
= srf
->autogen_filter
;
1155 cmd
->body
.size
.width
= srf
->base_size
.width
;
1156 cmd
->body
.size
.height
= srf
->base_size
.height
;
1157 cmd
->body
.size
.depth
= srf
->base_size
.depth
;
1160 vmw_fifo_commit(dev_priv
, submit_len
);
1165 vmw_resource_release_id(res
);
1167 vmw_fifo_resource_dec(dev_priv
);
1172 static int vmw_gb_surface_bind(struct vmw_resource
*res
,
1173 struct ttm_validate_buffer
*val_buf
)
1175 struct vmw_private
*dev_priv
= res
->dev_priv
;
1177 SVGA3dCmdHeader header
;
1178 SVGA3dCmdBindGBSurface body
;
1181 SVGA3dCmdHeader header
;
1182 SVGA3dCmdUpdateGBSurface body
;
1184 uint32_t submit_size
;
1185 struct ttm_buffer_object
*bo
= val_buf
->bo
;
1187 BUG_ON(bo
->mem
.mem_type
!= VMW_PL_MOB
);
1189 submit_size
= sizeof(*cmd1
) + (res
->backup_dirty
? sizeof(*cmd2
) : 0);
1191 cmd1
= VMW_FIFO_RESERVE(dev_priv
, submit_size
);
1192 if (unlikely(!cmd1
))
1195 cmd1
->header
.id
= SVGA_3D_CMD_BIND_GB_SURFACE
;
1196 cmd1
->header
.size
= sizeof(cmd1
->body
);
1197 cmd1
->body
.sid
= res
->id
;
1198 cmd1
->body
.mobid
= bo
->mem
.start
;
1199 if (res
->backup_dirty
) {
1200 cmd2
= (void *) &cmd1
[1];
1201 cmd2
->header
.id
= SVGA_3D_CMD_UPDATE_GB_SURFACE
;
1202 cmd2
->header
.size
= sizeof(cmd2
->body
);
1203 cmd2
->body
.sid
= res
->id
;
1205 vmw_fifo_commit(dev_priv
, submit_size
);
1207 if (res
->backup
->dirty
&& res
->backup_dirty
) {
1208 /* We've just made a full upload. Cear dirty regions. */
1209 vmw_bo_dirty_clear_res(res
);
1212 res
->backup_dirty
= false;
1217 static int vmw_gb_surface_unbind(struct vmw_resource
*res
,
1219 struct ttm_validate_buffer
*val_buf
)
1221 struct vmw_private
*dev_priv
= res
->dev_priv
;
1222 struct ttm_buffer_object
*bo
= val_buf
->bo
;
1223 struct vmw_fence_obj
*fence
;
1226 SVGA3dCmdHeader header
;
1227 SVGA3dCmdReadbackGBSurface body
;
1230 SVGA3dCmdHeader header
;
1231 SVGA3dCmdInvalidateGBSurface body
;
1234 SVGA3dCmdHeader header
;
1235 SVGA3dCmdBindGBSurface body
;
1237 uint32_t submit_size
;
1241 BUG_ON(bo
->mem
.mem_type
!= VMW_PL_MOB
);
1243 submit_size
= sizeof(*cmd3
) + (readback
? sizeof(*cmd1
) : sizeof(*cmd2
));
1244 cmd
= VMW_FIFO_RESERVE(dev_priv
, submit_size
);
1249 cmd1
= (void *) cmd
;
1250 cmd1
->header
.id
= SVGA_3D_CMD_READBACK_GB_SURFACE
;
1251 cmd1
->header
.size
= sizeof(cmd1
->body
);
1252 cmd1
->body
.sid
= res
->id
;
1253 cmd3
= (void *) &cmd1
[1];
1255 cmd2
= (void *) cmd
;
1256 cmd2
->header
.id
= SVGA_3D_CMD_INVALIDATE_GB_SURFACE
;
1257 cmd2
->header
.size
= sizeof(cmd2
->body
);
1258 cmd2
->body
.sid
= res
->id
;
1259 cmd3
= (void *) &cmd2
[1];
1262 cmd3
->header
.id
= SVGA_3D_CMD_BIND_GB_SURFACE
;
1263 cmd3
->header
.size
= sizeof(cmd3
->body
);
1264 cmd3
->body
.sid
= res
->id
;
1265 cmd3
->body
.mobid
= SVGA3D_INVALID_ID
;
1267 vmw_fifo_commit(dev_priv
, submit_size
);
1270 * Create a fence object and fence the backup buffer.
1273 (void) vmw_execbuf_fence_commands(NULL
, dev_priv
,
1276 vmw_bo_fence_single(val_buf
->bo
, fence
);
1278 if (likely(fence
!= NULL
))
1279 vmw_fence_obj_unreference(&fence
);
1284 static int vmw_gb_surface_destroy(struct vmw_resource
*res
)
1286 struct vmw_private
*dev_priv
= res
->dev_priv
;
1287 struct vmw_surface
*srf
= vmw_res_to_srf(res
);
1289 SVGA3dCmdHeader header
;
1290 SVGA3dCmdDestroyGBSurface body
;
1293 if (likely(res
->id
== -1))
1296 mutex_lock(&dev_priv
->binding_mutex
);
1297 vmw_view_surface_list_destroy(dev_priv
, &srf
->view_list
);
1298 vmw_binding_res_list_scrub(&res
->binding_head
);
1300 cmd
= VMW_FIFO_RESERVE(dev_priv
, sizeof(*cmd
));
1301 if (unlikely(!cmd
)) {
1302 mutex_unlock(&dev_priv
->binding_mutex
);
1306 cmd
->header
.id
= SVGA_3D_CMD_DESTROY_GB_SURFACE
;
1307 cmd
->header
.size
= sizeof(cmd
->body
);
1308 cmd
->body
.sid
= res
->id
;
1309 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
1310 mutex_unlock(&dev_priv
->binding_mutex
);
1311 vmw_resource_release_id(res
);
1312 vmw_fifo_resource_dec(dev_priv
);
1319 * vmw_gb_surface_define_ioctl - Ioctl function implementing
1320 * the user surface define functionality.
1322 * @dev: Pointer to a struct drm_device.
1323 * @data: Pointer to data copied from / to user-space.
1324 * @file_priv: Pointer to a drm file private structure.
1326 int vmw_gb_surface_define_ioctl(struct drm_device
*dev
, void *data
,
1327 struct drm_file
*file_priv
)
1329 union drm_vmw_gb_surface_create_arg
*arg
=
1330 (union drm_vmw_gb_surface_create_arg
*)data
;
1331 struct drm_vmw_gb_surface_create_rep
*rep
= &arg
->rep
;
1332 struct drm_vmw_gb_surface_create_ext_req req_ext
;
1334 req_ext
.base
= arg
->req
;
1335 req_ext
.version
= drm_vmw_gb_surface_v1
;
1336 req_ext
.svga3d_flags_upper_32_bits
= 0;
1337 req_ext
.multisample_pattern
= SVGA3D_MS_PATTERN_NONE
;
1338 req_ext
.quality_level
= SVGA3D_MS_QUALITY_NONE
;
1339 req_ext
.must_be_zero
= 0;
1341 return vmw_gb_surface_define_internal(dev
, &req_ext
, rep
, file_priv
);
1345 * vmw_gb_surface_reference_ioctl - Ioctl function implementing
1346 * the user surface reference functionality.
1348 * @dev: Pointer to a struct drm_device.
1349 * @data: Pointer to data copied from / to user-space.
1350 * @file_priv: Pointer to a drm file private structure.
1352 int vmw_gb_surface_reference_ioctl(struct drm_device
*dev
, void *data
,
1353 struct drm_file
*file_priv
)
1355 union drm_vmw_gb_surface_reference_arg
*arg
=
1356 (union drm_vmw_gb_surface_reference_arg
*)data
;
1357 struct drm_vmw_surface_arg
*req
= &arg
->req
;
1358 struct drm_vmw_gb_surface_ref_rep
*rep
= &arg
->rep
;
1359 struct drm_vmw_gb_surface_ref_ext_rep rep_ext
;
1362 ret
= vmw_gb_surface_reference_internal(dev
, req
, &rep_ext
, file_priv
);
1364 if (unlikely(ret
!= 0))
1367 rep
->creq
= rep_ext
.creq
.base
;
1368 rep
->crep
= rep_ext
.crep
;
1374 * vmw_surface_gb_priv_define - Define a private GB surface
1376 * @dev: Pointer to a struct drm_device
1377 * @user_accounting_size: Used to track user-space memory usage, set
1378 * to 0 for kernel mode only memory
1379 * @svga3d_flags: SVGA3d surface flags for the device
1380 * @format: requested surface format
1381 * @for_scanout: true if inteded to be used for scanout buffer
1382 * @num_mip_levels: number of MIP levels
1383 * @multisample_count:
1384 * @array_size: Surface array size.
1385 * @size: width, heigh, depth of the surface requested
1386 * @multisample_pattern: Multisampling pattern when msaa is supported
1387 * @quality_level: Precision settings
1388 * @user_srf_out: allocated user_srf. Set to NULL on failure.
1390 * GB surfaces allocated by this function will not have a user mode handle, and
1391 * thus will only be visible to vmwgfx. For optimization reasons the
1392 * surface may later be given a user mode handle by another function to make
1393 * it available to user mode drivers.
1395 int vmw_surface_gb_priv_define(struct drm_device
*dev
,
1396 uint32_t user_accounting_size
,
1397 SVGA3dSurfaceAllFlags svga3d_flags
,
1398 SVGA3dSurfaceFormat format
,
1400 uint32_t num_mip_levels
,
1401 uint32_t multisample_count
,
1402 uint32_t array_size
,
1403 struct drm_vmw_size size
,
1404 SVGA3dMSPattern multisample_pattern
,
1405 SVGA3dMSQualityLevel quality_level
,
1406 struct vmw_surface
**srf_out
)
1408 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1409 struct vmw_user_surface
*user_srf
;
1410 struct ttm_operation_ctx ctx
= {
1411 .interruptible
= true,
1412 .no_wait_gpu
= false
1414 struct vmw_surface
*srf
;
1417 u32 sample_count
= 1;
1422 if (!svga3dsurface_is_screen_target_format(format
)) {
1423 VMW_DEBUG_USER("Invalid Screen Target surface format.");
1427 if (size
.width
> dev_priv
->texture_max_width
||
1428 size
.height
> dev_priv
->texture_max_height
) {
1429 VMW_DEBUG_USER("%ux%u\n, exceeds max surface size %ux%u",
1430 size
.width
, size
.height
,
1431 dev_priv
->texture_max_width
,
1432 dev_priv
->texture_max_height
);
1436 const struct svga3d_surface_desc
*desc
;
1438 desc
= svga3dsurface_get_desc(format
);
1439 if (unlikely(desc
->block_desc
== SVGA3DBLOCKDESC_NONE
)) {
1440 VMW_DEBUG_USER("Invalid surface format.\n");
1445 /* array_size must be null for non-GL3 host. */
1446 if (array_size
> 0 && !dev_priv
->has_dx
) {
1447 VMW_DEBUG_USER("Tried to create DX surface on non-DX host.\n");
1451 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
1452 if (unlikely(ret
!= 0))
1455 ret
= ttm_mem_global_alloc(vmw_mem_glob(dev_priv
),
1456 user_accounting_size
, &ctx
);
1457 if (unlikely(ret
!= 0)) {
1458 if (ret
!= -ERESTARTSYS
)
1459 DRM_ERROR("Out of graphics memory for surface"
1464 user_srf
= kzalloc(sizeof(*user_srf
), GFP_KERNEL
);
1465 if (unlikely(!user_srf
)) {
1467 goto out_no_user_srf
;
1470 *srf_out
= &user_srf
->srf
;
1471 user_srf
->size
= user_accounting_size
;
1472 user_srf
->prime
.base
.shareable
= false;
1473 user_srf
->prime
.base
.tfile
= NULL
;
1475 srf
= &user_srf
->srf
;
1476 srf
->flags
= svga3d_flags
;
1477 srf
->format
= format
;
1478 srf
->scanout
= for_scanout
;
1479 srf
->mip_levels
[0] = num_mip_levels
;
1482 srf
->offsets
= NULL
;
1483 srf
->base_size
= size
;
1484 srf
->autogen_filter
= SVGA3D_TEX_FILTER_NONE
;
1485 srf
->array_size
= array_size
;
1486 srf
->multisample_count
= multisample_count
;
1487 srf
->multisample_pattern
= multisample_pattern
;
1488 srf
->quality_level
= quality_level
;
1491 num_layers
= array_size
;
1492 else if (svga3d_flags
& SVGA3D_SURFACE_CUBEMAP
)
1493 num_layers
= SVGA3D_MAX_SURFACE_FACES
;
1495 if (srf
->flags
& SVGA3D_SURFACE_MULTISAMPLE
)
1496 sample_count
= srf
->multisample_count
;
1498 srf
->res
.backup_size
=
1499 svga3dsurface_get_serialized_size_extended(srf
->format
,
1505 if (srf
->flags
& SVGA3D_SURFACE_BIND_STREAM_OUTPUT
)
1506 srf
->res
.backup_size
+= sizeof(SVGA3dDXSOState
);
1509 * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
1510 * size greater than STDU max width/height. This is really a workaround
1511 * to support creation of big framebuffer requested by some user-space
1512 * for whole topology. That big framebuffer won't really be used for
1513 * binding with screen target as during prepare_fb a separate surface is
1514 * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
1516 if (dev_priv
->active_display_unit
== vmw_du_screen_target
&&
1517 for_scanout
&& size
.width
<= dev_priv
->stdu_max_width
&&
1518 size
.height
<= dev_priv
->stdu_max_height
)
1519 srf
->flags
|= SVGA3D_SURFACE_SCREENTARGET
;
1522 * From this point, the generic resource management functions
1523 * destroy the object on failure.
1525 ret
= vmw_surface_init(dev_priv
, srf
, vmw_user_surface_free
);
1527 ttm_read_unlock(&dev_priv
->reservation_sem
);
1531 ttm_mem_global_free(vmw_mem_glob(dev_priv
), user_accounting_size
);
1534 ttm_read_unlock(&dev_priv
->reservation_sem
);
1539 * vmw_gb_surface_define_ext_ioctl - Ioctl function implementing
1540 * the user surface define functionality.
1542 * @dev: Pointer to a struct drm_device.
1543 * @data: Pointer to data copied from / to user-space.
1544 * @file_priv: Pointer to a drm file private structure.
1546 int vmw_gb_surface_define_ext_ioctl(struct drm_device
*dev
, void *data
,
1547 struct drm_file
*file_priv
)
1549 union drm_vmw_gb_surface_create_ext_arg
*arg
=
1550 (union drm_vmw_gb_surface_create_ext_arg
*)data
;
1551 struct drm_vmw_gb_surface_create_ext_req
*req
= &arg
->req
;
1552 struct drm_vmw_gb_surface_create_rep
*rep
= &arg
->rep
;
1554 return vmw_gb_surface_define_internal(dev
, req
, rep
, file_priv
);
1558 * vmw_gb_surface_reference_ext_ioctl - Ioctl function implementing
1559 * the user surface reference functionality.
1561 * @dev: Pointer to a struct drm_device.
1562 * @data: Pointer to data copied from / to user-space.
1563 * @file_priv: Pointer to a drm file private structure.
1565 int vmw_gb_surface_reference_ext_ioctl(struct drm_device
*dev
, void *data
,
1566 struct drm_file
*file_priv
)
1568 union drm_vmw_gb_surface_reference_ext_arg
*arg
=
1569 (union drm_vmw_gb_surface_reference_ext_arg
*)data
;
1570 struct drm_vmw_surface_arg
*req
= &arg
->req
;
1571 struct drm_vmw_gb_surface_ref_ext_rep
*rep
= &arg
->rep
;
1573 return vmw_gb_surface_reference_internal(dev
, req
, rep
, file_priv
);
1577 * vmw_gb_surface_define_internal - Ioctl function implementing
1578 * the user surface define functionality.
1580 * @dev: Pointer to a struct drm_device.
1581 * @req: Request argument from user-space.
1582 * @rep: Response argument to user-space.
1583 * @file_priv: Pointer to a drm file private structure.
1586 vmw_gb_surface_define_internal(struct drm_device
*dev
,
1587 struct drm_vmw_gb_surface_create_ext_req
*req
,
1588 struct drm_vmw_gb_surface_create_rep
*rep
,
1589 struct drm_file
*file_priv
)
1591 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1592 struct vmw_user_surface
*user_srf
;
1593 struct vmw_surface
*srf
;
1594 struct vmw_resource
*res
;
1595 struct vmw_resource
*tmp
;
1596 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1599 uint32_t backup_handle
= 0;
1600 SVGA3dSurfaceAllFlags svga3d_flags_64
=
1601 SVGA3D_FLAGS_64(req
->svga3d_flags_upper_32_bits
,
1602 req
->base
.svga3d_flags
);
1604 if (!dev_priv
->has_sm4_1
) {
1606 * If SM4_1 is not support then cannot send 64-bit flag to
1609 if (req
->svga3d_flags_upper_32_bits
!= 0)
1612 if (req
->base
.multisample_count
!= 0)
1615 if (req
->multisample_pattern
!= SVGA3D_MS_PATTERN_NONE
)
1618 if (req
->quality_level
!= SVGA3D_MS_QUALITY_NONE
)
1622 if ((svga3d_flags_64
& SVGA3D_SURFACE_MULTISAMPLE
) &&
1623 req
->base
.multisample_count
== 0)
1626 if (req
->base
.mip_levels
> DRM_VMW_MAX_MIP_LEVELS
)
1629 if (unlikely(vmw_user_surface_size
== 0))
1630 vmw_user_surface_size
= ttm_round_pot(sizeof(*user_srf
)) +
1631 VMW_IDA_ACC_SIZE
+ TTM_OBJ_EXTRA_SIZE
;
1633 size
= vmw_user_surface_size
;
1635 /* Define a surface based on the parameters. */
1636 ret
= vmw_surface_gb_priv_define(dev
,
1640 req
->base
.drm_surface_flags
&
1641 drm_vmw_surface_flag_scanout
,
1642 req
->base
.mip_levels
,
1643 req
->base
.multisample_count
,
1644 req
->base
.array_size
,
1645 req
->base
.base_size
,
1646 req
->multisample_pattern
,
1649 if (unlikely(ret
!= 0))
1652 user_srf
= container_of(srf
, struct vmw_user_surface
, srf
);
1653 if (drm_is_primary_client(file_priv
))
1654 user_srf
->master
= drm_master_get(file_priv
->master
);
1656 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
1657 if (unlikely(ret
!= 0))
1660 res
= &user_srf
->srf
.res
;
1662 if (req
->base
.buffer_handle
!= SVGA3D_INVALID_ID
) {
1663 ret
= vmw_user_bo_lookup(tfile
, req
->base
.buffer_handle
,
1665 &user_srf
->backup_base
);
1667 if (res
->backup
->base
.num_pages
* PAGE_SIZE
<
1669 VMW_DEBUG_USER("Surface backup buffer too small.\n");
1670 vmw_bo_unreference(&res
->backup
);
1674 backup_handle
= req
->base
.buffer_handle
;
1677 } else if (req
->base
.drm_surface_flags
&
1678 (drm_vmw_surface_flag_create_buffer
|
1679 drm_vmw_surface_flag_coherent
))
1680 ret
= vmw_user_bo_alloc(dev_priv
, tfile
,
1682 req
->base
.drm_surface_flags
&
1683 drm_vmw_surface_flag_shareable
,
1686 &user_srf
->backup_base
);
1688 if (unlikely(ret
!= 0)) {
1689 vmw_resource_unreference(&res
);
1693 if (req
->base
.drm_surface_flags
& drm_vmw_surface_flag_coherent
) {
1694 struct vmw_buffer_object
*backup
= res
->backup
;
1696 ttm_bo_reserve(&backup
->base
, false, false, NULL
);
1697 if (!res
->func
->dirty_alloc
)
1700 ret
= vmw_bo_dirty_add(backup
);
1702 res
->coherent
= true;
1703 ret
= res
->func
->dirty_alloc(res
);
1705 ttm_bo_unreserve(&backup
->base
);
1707 vmw_resource_unreference(&res
);
1713 tmp
= vmw_resource_reference(res
);
1714 ret
= ttm_prime_object_init(tfile
, res
->backup_size
, &user_srf
->prime
,
1715 req
->base
.drm_surface_flags
&
1716 drm_vmw_surface_flag_shareable
,
1718 &vmw_user_surface_base_release
, NULL
);
1720 if (unlikely(ret
!= 0)) {
1721 vmw_resource_unreference(&tmp
);
1722 vmw_resource_unreference(&res
);
1726 rep
->handle
= user_srf
->prime
.base
.handle
;
1727 rep
->backup_size
= res
->backup_size
;
1729 rep
->buffer_map_handle
=
1730 drm_vma_node_offset_addr(&res
->backup
->base
.base
.vma_node
);
1731 rep
->buffer_size
= res
->backup
->base
.num_pages
* PAGE_SIZE
;
1732 rep
->buffer_handle
= backup_handle
;
1734 rep
->buffer_map_handle
= 0;
1735 rep
->buffer_size
= 0;
1736 rep
->buffer_handle
= SVGA3D_INVALID_ID
;
1739 vmw_resource_unreference(&res
);
1742 ttm_read_unlock(&dev_priv
->reservation_sem
);
1747 * vmw_gb_surface_reference_internal - Ioctl function implementing
1748 * the user surface reference functionality.
1750 * @dev: Pointer to a struct drm_device.
1751 * @req: Pointer to user-space request surface arg.
1752 * @rep: Pointer to response to user-space.
1753 * @file_priv: Pointer to a drm file private structure.
1756 vmw_gb_surface_reference_internal(struct drm_device
*dev
,
1757 struct drm_vmw_surface_arg
*req
,
1758 struct drm_vmw_gb_surface_ref_ext_rep
*rep
,
1759 struct drm_file
*file_priv
)
1761 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1762 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1763 struct vmw_surface
*srf
;
1764 struct vmw_user_surface
*user_srf
;
1765 struct ttm_base_object
*base
;
1766 uint32_t backup_handle
;
1769 ret
= vmw_surface_handle_reference(dev_priv
, file_priv
, req
->sid
,
1770 req
->handle_type
, &base
);
1771 if (unlikely(ret
!= 0))
1774 user_srf
= container_of(base
, struct vmw_user_surface
, prime
.base
);
1775 srf
= &user_srf
->srf
;
1776 if (!srf
->res
.backup
) {
1777 DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
1778 goto out_bad_resource
;
1781 mutex_lock(&dev_priv
->cmdbuf_mutex
); /* Protect res->backup */
1782 ret
= vmw_user_bo_reference(tfile
, srf
->res
.backup
, &backup_handle
);
1783 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
1785 if (unlikely(ret
!= 0)) {
1786 DRM_ERROR("Could not add a reference to a GB surface "
1787 "backup buffer.\n");
1788 (void) ttm_ref_object_base_unref(tfile
, base
->handle
,
1790 goto out_bad_resource
;
1793 rep
->creq
.base
.svga3d_flags
= SVGA3D_FLAGS_LOWER_32(srf
->flags
);
1794 rep
->creq
.base
.format
= srf
->format
;
1795 rep
->creq
.base
.mip_levels
= srf
->mip_levels
[0];
1796 rep
->creq
.base
.drm_surface_flags
= 0;
1797 rep
->creq
.base
.multisample_count
= srf
->multisample_count
;
1798 rep
->creq
.base
.autogen_filter
= srf
->autogen_filter
;
1799 rep
->creq
.base
.array_size
= srf
->array_size
;
1800 rep
->creq
.base
.buffer_handle
= backup_handle
;
1801 rep
->creq
.base
.base_size
= srf
->base_size
;
1802 rep
->crep
.handle
= user_srf
->prime
.base
.handle
;
1803 rep
->crep
.backup_size
= srf
->res
.backup_size
;
1804 rep
->crep
.buffer_handle
= backup_handle
;
1805 rep
->crep
.buffer_map_handle
=
1806 drm_vma_node_offset_addr(&srf
->res
.backup
->base
.base
.vma_node
);
1807 rep
->crep
.buffer_size
= srf
->res
.backup
->base
.num_pages
* PAGE_SIZE
;
1809 rep
->creq
.version
= drm_vmw_gb_surface_v1
;
1810 rep
->creq
.svga3d_flags_upper_32_bits
=
1811 SVGA3D_FLAGS_UPPER_32(srf
->flags
);
1812 rep
->creq
.multisample_pattern
= srf
->multisample_pattern
;
1813 rep
->creq
.quality_level
= srf
->quality_level
;
1814 rep
->creq
.must_be_zero
= 0;
1817 ttm_base_object_unref(&base
);
1823 * vmw_subres_dirty_add - Add a dirty region to a subresource
1824 * @dirty: The surfaces's dirty tracker.
1825 * @loc_start: The location corresponding to the start of the region.
1826 * @loc_end: The location corresponding to the end of the region.
1828 * As we are assuming that @loc_start and @loc_end represent a sequential
1829 * range of backing store memory, if the region spans multiple lines then
1830 * regardless of the x coordinate, the full lines are dirtied.
1831 * Correspondingly if the region spans multiple z slices, then full rather
1832 * than partial z slices are dirtied.
1834 static void vmw_subres_dirty_add(struct vmw_surface_dirty
*dirty
,
1835 const struct svga3dsurface_loc
*loc_start
,
1836 const struct svga3dsurface_loc
*loc_end
)
1838 const struct svga3dsurface_cache
*cache
= &dirty
->cache
;
1839 SVGA3dBox
*box
= &dirty
->boxes
[loc_start
->sub_resource
];
1840 u32 mip
= loc_start
->sub_resource
% cache
->num_mip_levels
;
1841 const struct drm_vmw_size
*size
= &cache
->mip
[mip
].size
;
1842 u32 box_c2
= box
->z
+ box
->d
;
1844 if (WARN_ON(loc_start
->sub_resource
>= dirty
->num_subres
))
1847 if (box
->d
== 0 || box
->z
> loc_start
->z
)
1848 box
->z
= loc_start
->z
;
1849 if (box_c2
< loc_end
->z
)
1850 box
->d
= loc_end
->z
- box
->z
;
1852 if (loc_start
->z
+ 1 == loc_end
->z
) {
1853 box_c2
= box
->y
+ box
->h
;
1854 if (box
->h
== 0 || box
->y
> loc_start
->y
)
1855 box
->y
= loc_start
->y
;
1856 if (box_c2
< loc_end
->y
)
1857 box
->h
= loc_end
->y
- box
->y
;
1859 if (loc_start
->y
+ 1 == loc_end
->y
) {
1860 box_c2
= box
->x
+ box
->w
;
1861 if (box
->w
== 0 || box
->x
> loc_start
->x
)
1862 box
->x
= loc_start
->x
;
1863 if (box_c2
< loc_end
->x
)
1864 box
->w
= loc_end
->x
- box
->x
;
1867 box
->w
= size
->width
;
1871 box
->h
= size
->height
;
1873 box
->w
= size
->width
;
1878 * vmw_subres_dirty_full - Mark a full subresource as dirty
1879 * @dirty: The surface's dirty tracker.
1880 * @subres: The subresource
1882 static void vmw_subres_dirty_full(struct vmw_surface_dirty
*dirty
, u32 subres
)
1884 const struct svga3dsurface_cache
*cache
= &dirty
->cache
;
1885 u32 mip
= subres
% cache
->num_mip_levels
;
1886 const struct drm_vmw_size
*size
= &cache
->mip
[mip
].size
;
1887 SVGA3dBox
*box
= &dirty
->boxes
[subres
];
1892 box
->w
= size
->width
;
1893 box
->h
= size
->height
;
1894 box
->d
= size
->depth
;
1898 * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for texture
1901 static void vmw_surface_tex_dirty_range_add(struct vmw_resource
*res
,
1902 size_t start
, size_t end
)
1904 struct vmw_surface_dirty
*dirty
=
1905 (struct vmw_surface_dirty
*) res
->dirty
;
1906 size_t backup_end
= res
->backup_offset
+ res
->backup_size
;
1907 struct svga3dsurface_loc loc1
, loc2
;
1908 const struct svga3dsurface_cache
*cache
;
1910 start
= max_t(size_t, start
, res
->backup_offset
) - res
->backup_offset
;
1911 end
= min(end
, backup_end
) - res
->backup_offset
;
1912 cache
= &dirty
->cache
;
1913 svga3dsurface_get_loc(cache
, &loc1
, start
);
1914 svga3dsurface_get_loc(cache
, &loc2
, end
- 1);
1915 svga3dsurface_inc_loc(cache
, &loc2
);
1917 if (loc1
.sub_resource
+ 1 == loc2
.sub_resource
) {
1918 /* Dirty range covers a single sub-resource */
1919 vmw_subres_dirty_add(dirty
, &loc1
, &loc2
);
1921 /* Dirty range covers multiple sub-resources */
1922 struct svga3dsurface_loc loc_min
, loc_max
;
1925 svga3dsurface_max_loc(cache
, loc1
.sub_resource
, &loc_max
);
1926 vmw_subres_dirty_add(dirty
, &loc1
, &loc_max
);
1927 svga3dsurface_min_loc(cache
, loc2
.sub_resource
- 1, &loc_min
);
1928 vmw_subres_dirty_add(dirty
, &loc_min
, &loc2
);
1929 for (sub_res
= loc1
.sub_resource
+ 1;
1930 sub_res
< loc2
.sub_resource
- 1; ++sub_res
)
1931 vmw_subres_dirty_full(dirty
, sub_res
);
1936 * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for buffer
1939 static void vmw_surface_buf_dirty_range_add(struct vmw_resource
*res
,
1940 size_t start
, size_t end
)
1942 struct vmw_surface_dirty
*dirty
=
1943 (struct vmw_surface_dirty
*) res
->dirty
;
1944 const struct svga3dsurface_cache
*cache
= &dirty
->cache
;
1945 size_t backup_end
= res
->backup_offset
+ cache
->mip_chain_bytes
;
1946 SVGA3dBox
*box
= &dirty
->boxes
[0];
1949 box
->h
= box
->d
= 1;
1950 start
= max_t(size_t, start
, res
->backup_offset
) - res
->backup_offset
;
1951 end
= min(end
, backup_end
) - res
->backup_offset
;
1952 box_c2
= box
->x
+ box
->w
;
1953 if (box
->w
== 0 || box
->x
> start
)
1956 box
->w
= end
- box
->x
;
1960 * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for surfaces
1962 static void vmw_surface_dirty_range_add(struct vmw_resource
*res
, size_t start
,
1965 struct vmw_surface
*srf
= vmw_res_to_srf(res
);
1967 if (WARN_ON(end
<= res
->backup_offset
||
1968 start
>= res
->backup_offset
+ res
->backup_size
))
1971 if (srf
->format
== SVGA3D_BUFFER
)
1972 vmw_surface_buf_dirty_range_add(res
, start
, end
);
1974 vmw_surface_tex_dirty_range_add(res
, start
, end
);
1978 * vmw_surface_dirty_sync - The surface's dirty_sync callback.
1980 static int vmw_surface_dirty_sync(struct vmw_resource
*res
)
1982 struct vmw_private
*dev_priv
= res
->dev_priv
;
1985 struct vmw_surface_dirty
*dirty
=
1986 (struct vmw_surface_dirty
*) res
->dirty
;
1988 const struct svga3dsurface_cache
*cache
= &dirty
->cache
;
1990 SVGA3dCmdHeader header
;
1991 SVGA3dCmdDXUpdateSubResource body
;
1994 SVGA3dCmdHeader header
;
1995 SVGA3dCmdUpdateGBImage body
;
2000 for (i
= 0; i
< dirty
->num_subres
; ++i
) {
2001 const SVGA3dBox
*box
= &dirty
->boxes
[i
];
2010 alloc_size
= num_dirty
* ((has_dx
) ? sizeof(*cmd1
) : sizeof(*cmd2
));
2011 cmd
= VMW_FIFO_RESERVE(dev_priv
, alloc_size
);
2018 for (i
= 0; i
< dirty
->num_subres
; ++i
) {
2019 const SVGA3dBox
*box
= &dirty
->boxes
[i
];
2025 * DX_UPDATE_SUBRESOURCE is aware of array surfaces.
2026 * UPDATE_GB_IMAGE is not.
2029 cmd1
->header
.id
= SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE
;
2030 cmd1
->header
.size
= sizeof(cmd1
->body
);
2031 cmd1
->body
.sid
= res
->id
;
2032 cmd1
->body
.subResource
= i
;
2033 cmd1
->body
.box
= *box
;
2036 cmd2
->header
.id
= SVGA_3D_CMD_UPDATE_GB_IMAGE
;
2037 cmd2
->header
.size
= sizeof(cmd2
->body
);
2038 cmd2
->body
.image
.sid
= res
->id
;
2039 cmd2
->body
.image
.face
= i
/ cache
->num_mip_levels
;
2040 cmd2
->body
.image
.mipmap
= i
-
2041 (cache
->num_mip_levels
* cmd2
->body
.image
.face
);
2042 cmd2
->body
.box
= *box
;
2047 vmw_fifo_commit(dev_priv
, alloc_size
);
2049 memset(&dirty
->boxes
[0], 0, sizeof(dirty
->boxes
[0]) *
2056 * vmw_surface_dirty_alloc - The surface's dirty_alloc callback.
2058 static int vmw_surface_dirty_alloc(struct vmw_resource
*res
)
2060 struct vmw_surface
*srf
= vmw_res_to_srf(res
);
2061 struct vmw_surface_dirty
*dirty
;
2066 size_t dirty_size
, acc_size
;
2067 static struct ttm_operation_ctx ctx
= {
2068 .interruptible
= false,
2069 .no_wait_gpu
= false
2073 if (srf
->array_size
)
2074 num_layers
= srf
->array_size
;
2075 else if (srf
->flags
& SVGA3D_SURFACE_CUBEMAP
)
2076 num_layers
*= SVGA3D_MAX_SURFACE_FACES
;
2078 num_mip
= srf
->mip_levels
[0];
2082 num_subres
= num_layers
* num_mip
;
2083 dirty_size
= sizeof(*dirty
) + num_subres
* sizeof(dirty
->boxes
[0]);
2084 acc_size
= ttm_round_pot(dirty_size
);
2085 ret
= ttm_mem_global_alloc(vmw_mem_glob(res
->dev_priv
),
2088 VMW_DEBUG_USER("Out of graphics memory for surface "
2089 "dirty tracker.\n");
2093 dirty
= kvzalloc(dirty_size
, GFP_KERNEL
);
2099 num_samples
= max_t(u32
, 1, srf
->multisample_count
);
2100 ret
= svga3dsurface_setup_cache(&srf
->base_size
, srf
->format
, num_mip
,
2101 num_layers
, num_samples
, &dirty
->cache
);
2105 dirty
->num_subres
= num_subres
;
2106 dirty
->size
= acc_size
;
2107 res
->dirty
= (struct vmw_resource_dirty
*) dirty
;
2114 ttm_mem_global_free(vmw_mem_glob(res
->dev_priv
), acc_size
);
2119 * vmw_surface_dirty_free - The surface's dirty_free callback
2121 static void vmw_surface_dirty_free(struct vmw_resource
*res
)
2123 struct vmw_surface_dirty
*dirty
=
2124 (struct vmw_surface_dirty
*) res
->dirty
;
2125 size_t acc_size
= dirty
->size
;
2128 ttm_mem_global_free(vmw_mem_glob(res
->dev_priv
), acc_size
);
2133 * vmw_surface_clean - The surface's clean callback
2135 static int vmw_surface_clean(struct vmw_resource
*res
)
2137 struct vmw_private
*dev_priv
= res
->dev_priv
;
2140 SVGA3dCmdHeader header
;
2141 SVGA3dCmdReadbackGBSurface body
;
2144 alloc_size
= sizeof(*cmd
);
2145 cmd
= VMW_FIFO_RESERVE(dev_priv
, alloc_size
);
2149 cmd
->header
.id
= SVGA_3D_CMD_READBACK_GB_SURFACE
;
2150 cmd
->header
.size
= sizeof(cmd
->body
);
2151 cmd
->body
.sid
= res
->id
;
2152 vmw_fifo_commit(dev_priv
, alloc_size
);