1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <drm/ttm/ttm_placement.h>
30 #include "vmwgfx_drv.h"
31 #include "vmwgfx_resource_priv.h"
32 #include "vmwgfx_so.h"
33 #include "vmwgfx_binding.h"
34 #include "device_include/svga3d_surfacedefs.h"
36 #define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
37 #define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
38 #define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
39 (svga3d_flags & ((uint64_t)U32_MAX))
42 * struct vmw_user_surface - User-space visible surface resource
44 * @base: The TTM base object handling user-space visibility.
45 * @srf: The surface metadata.
46 * @size: TTM accounting size for the surface.
47 * @master: master of the creating client. Used for security check.
49 struct vmw_user_surface
{
50 struct ttm_prime_object prime
;
51 struct vmw_surface srf
;
53 struct drm_master
*master
;
54 struct ttm_base_object
*backup_base
;
58 * struct vmw_surface_offset - Backing store mip level offset info
60 * @face: Surface face.
62 * @bo_offset: Offset into backing store of this mip level.
65 struct vmw_surface_offset
{
72 * vmw_surface_dirty - Surface dirty-tracker
73 * @cache: Cached layout information of the surface.
74 * @size: Accounting size for the struct vmw_surface_dirty.
75 * @num_subres: Number of subresources.
76 * @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource.
78 struct vmw_surface_dirty
{
79 struct svga3dsurface_cache cache
;
85 static void vmw_user_surface_free(struct vmw_resource
*res
);
86 static struct vmw_resource
*
87 vmw_user_surface_base_to_res(struct ttm_base_object
*base
);
88 static int vmw_legacy_srf_bind(struct vmw_resource
*res
,
89 struct ttm_validate_buffer
*val_buf
);
90 static int vmw_legacy_srf_unbind(struct vmw_resource
*res
,
92 struct ttm_validate_buffer
*val_buf
);
93 static int vmw_legacy_srf_create(struct vmw_resource
*res
);
94 static int vmw_legacy_srf_destroy(struct vmw_resource
*res
);
95 static int vmw_gb_surface_create(struct vmw_resource
*res
);
96 static int vmw_gb_surface_bind(struct vmw_resource
*res
,
97 struct ttm_validate_buffer
*val_buf
);
98 static int vmw_gb_surface_unbind(struct vmw_resource
*res
,
100 struct ttm_validate_buffer
*val_buf
);
101 static int vmw_gb_surface_destroy(struct vmw_resource
*res
);
103 vmw_gb_surface_define_internal(struct drm_device
*dev
,
104 struct drm_vmw_gb_surface_create_ext_req
*req
,
105 struct drm_vmw_gb_surface_create_rep
*rep
,
106 struct drm_file
*file_priv
);
108 vmw_gb_surface_reference_internal(struct drm_device
*dev
,
109 struct drm_vmw_surface_arg
*req
,
110 struct drm_vmw_gb_surface_ref_ext_rep
*rep
,
111 struct drm_file
*file_priv
);
113 static void vmw_surface_dirty_free(struct vmw_resource
*res
);
114 static int vmw_surface_dirty_alloc(struct vmw_resource
*res
);
115 static int vmw_surface_dirty_sync(struct vmw_resource
*res
);
116 static void vmw_surface_dirty_range_add(struct vmw_resource
*res
, size_t start
,
118 static int vmw_surface_clean(struct vmw_resource
*res
);
120 static const struct vmw_user_resource_conv user_surface_conv
= {
121 .object_type
= VMW_RES_SURFACE
,
122 .base_obj_to_res
= vmw_user_surface_base_to_res
,
123 .res_free
= vmw_user_surface_free
126 const struct vmw_user_resource_conv
*user_surface_converter
=
130 static uint64_t vmw_user_surface_size
;
132 static const struct vmw_res_func vmw_legacy_surface_func
= {
133 .res_type
= vmw_res_surface
,
134 .needs_backup
= false,
138 .type_name
= "legacy surfaces",
139 .backup_placement
= &vmw_srf_placement
,
140 .create
= &vmw_legacy_srf_create
,
141 .destroy
= &vmw_legacy_srf_destroy
,
142 .bind
= &vmw_legacy_srf_bind
,
143 .unbind
= &vmw_legacy_srf_unbind
146 static const struct vmw_res_func vmw_gb_surface_func
= {
147 .res_type
= vmw_res_surface
,
148 .needs_backup
= true,
152 .type_name
= "guest backed surfaces",
153 .backup_placement
= &vmw_mob_placement
,
154 .create
= vmw_gb_surface_create
,
155 .destroy
= vmw_gb_surface_destroy
,
156 .bind
= vmw_gb_surface_bind
,
157 .unbind
= vmw_gb_surface_unbind
,
158 .dirty_alloc
= vmw_surface_dirty_alloc
,
159 .dirty_free
= vmw_surface_dirty_free
,
160 .dirty_sync
= vmw_surface_dirty_sync
,
161 .dirty_range_add
= vmw_surface_dirty_range_add
,
162 .clean
= vmw_surface_clean
,
166 * struct vmw_surface_dma - SVGA3D DMA command
168 struct vmw_surface_dma
{
169 SVGA3dCmdHeader header
;
170 SVGA3dCmdSurfaceDMA body
;
172 SVGA3dCmdSurfaceDMASuffix suffix
;
176 * struct vmw_surface_define - SVGA3D Surface Define command
178 struct vmw_surface_define
{
179 SVGA3dCmdHeader header
;
180 SVGA3dCmdDefineSurface body
;
184 * struct vmw_surface_destroy - SVGA3D Surface Destroy command
186 struct vmw_surface_destroy
{
187 SVGA3dCmdHeader header
;
188 SVGA3dCmdDestroySurface body
;
193 * vmw_surface_dma_size - Compute fifo size for a dma command.
195 * @srf: Pointer to a struct vmw_surface
197 * Computes the required size for a surface dma command for backup or
198 * restoration of the surface represented by @srf.
200 static inline uint32_t vmw_surface_dma_size(const struct vmw_surface
*srf
)
202 return srf
->metadata
.num_sizes
* sizeof(struct vmw_surface_dma
);
207 * vmw_surface_define_size - Compute fifo size for a surface define command.
209 * @srf: Pointer to a struct vmw_surface
211 * Computes the required size for a surface define command for the definition
212 * of the surface represented by @srf.
214 static inline uint32_t vmw_surface_define_size(const struct vmw_surface
*srf
)
216 return sizeof(struct vmw_surface_define
) + srf
->metadata
.num_sizes
*
222 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
224 * Computes the required size for a surface destroy command for the destruction
227 static inline uint32_t vmw_surface_destroy_size(void)
229 return sizeof(struct vmw_surface_destroy
);
233 * vmw_surface_destroy_encode - Encode a surface_destroy command.
235 * @id: The surface id
236 * @cmd_space: Pointer to memory area in which the commands should be encoded.
238 static void vmw_surface_destroy_encode(uint32_t id
,
241 struct vmw_surface_destroy
*cmd
= (struct vmw_surface_destroy
*)
244 cmd
->header
.id
= SVGA_3D_CMD_SURFACE_DESTROY
;
245 cmd
->header
.size
= sizeof(cmd
->body
);
250 * vmw_surface_define_encode - Encode a surface_define command.
252 * @srf: Pointer to a struct vmw_surface object.
253 * @cmd_space: Pointer to memory area in which the commands should be encoded.
255 static void vmw_surface_define_encode(const struct vmw_surface
*srf
,
258 struct vmw_surface_define
*cmd
= (struct vmw_surface_define
*)
260 struct drm_vmw_size
*src_size
;
261 SVGA3dSize
*cmd_size
;
265 cmd_len
= sizeof(cmd
->body
) + srf
->metadata
.num_sizes
*
268 cmd
->header
.id
= SVGA_3D_CMD_SURFACE_DEFINE
;
269 cmd
->header
.size
= cmd_len
;
270 cmd
->body
.sid
= srf
->res
.id
;
272 * Downcast of surfaceFlags, was upcasted when received from user-space,
273 * since driver internally stores as 64 bit.
274 * For legacy surface define only 32 bit flag is supported.
276 cmd
->body
.surfaceFlags
= (SVGA3dSurface1Flags
)srf
->metadata
.flags
;
277 cmd
->body
.format
= srf
->metadata
.format
;
278 for (i
= 0; i
< DRM_VMW_MAX_SURFACE_FACES
; ++i
)
279 cmd
->body
.face
[i
].numMipLevels
= srf
->metadata
.mip_levels
[i
];
282 cmd_size
= (SVGA3dSize
*) cmd
;
283 src_size
= srf
->metadata
.sizes
;
285 for (i
= 0; i
< srf
->metadata
.num_sizes
; ++i
, cmd_size
++, src_size
++) {
286 cmd_size
->width
= src_size
->width
;
287 cmd_size
->height
= src_size
->height
;
288 cmd_size
->depth
= src_size
->depth
;
293 * vmw_surface_dma_encode - Encode a surface_dma command.
295 * @srf: Pointer to a struct vmw_surface object.
296 * @cmd_space: Pointer to memory area in which the commands should be encoded.
297 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
298 * should be placed or read from.
299 * @to_surface: Boolean whether to DMA to the surface or from the surface.
301 static void vmw_surface_dma_encode(struct vmw_surface
*srf
,
303 const SVGAGuestPtr
*ptr
,
307 struct vmw_surface_dma
*cmd
= (struct vmw_surface_dma
*)cmd_space
;
308 const struct svga3d_surface_desc
*desc
=
309 svga3dsurface_get_desc(srf
->metadata
.format
);
311 for (i
= 0; i
< srf
->metadata
.num_sizes
; ++i
) {
312 SVGA3dCmdHeader
*header
= &cmd
->header
;
313 SVGA3dCmdSurfaceDMA
*body
= &cmd
->body
;
314 SVGA3dCopyBox
*cb
= &cmd
->cb
;
315 SVGA3dCmdSurfaceDMASuffix
*suffix
= &cmd
->suffix
;
316 const struct vmw_surface_offset
*cur_offset
= &srf
->offsets
[i
];
317 const struct drm_vmw_size
*cur_size
= &srf
->metadata
.sizes
[i
];
319 header
->id
= SVGA_3D_CMD_SURFACE_DMA
;
320 header
->size
= sizeof(*body
) + sizeof(*cb
) + sizeof(*suffix
);
322 body
->guest
.ptr
= *ptr
;
323 body
->guest
.ptr
.offset
+= cur_offset
->bo_offset
;
324 body
->guest
.pitch
= svga3dsurface_calculate_pitch(desc
,
326 body
->host
.sid
= srf
->res
.id
;
327 body
->host
.face
= cur_offset
->face
;
328 body
->host
.mipmap
= cur_offset
->mip
;
329 body
->transfer
= ((to_surface
) ? SVGA3D_WRITE_HOST_VRAM
:
330 SVGA3D_READ_HOST_VRAM
);
337 cb
->w
= cur_size
->width
;
338 cb
->h
= cur_size
->height
;
339 cb
->d
= cur_size
->depth
;
341 suffix
->suffixSize
= sizeof(*suffix
);
342 suffix
->maximumOffset
=
343 svga3dsurface_get_image_buffer_size(desc
, cur_size
,
345 suffix
->flags
.discard
= 0;
346 suffix
->flags
.unsynchronized
= 0;
347 suffix
->flags
.reserved
= 0;
354 * vmw_hw_surface_destroy - destroy a Device surface
356 * @res: Pointer to a struct vmw_resource embedded in a struct
359 * Destroys a the device surface associated with a struct vmw_surface if
360 * any, and adjusts accounting and resource count accordingly.
362 static void vmw_hw_surface_destroy(struct vmw_resource
*res
)
365 struct vmw_private
*dev_priv
= res
->dev_priv
;
368 if (res
->func
->destroy
== vmw_gb_surface_destroy
) {
369 (void) vmw_gb_surface_destroy(res
);
375 cmd
= VMW_FIFO_RESERVE(dev_priv
, vmw_surface_destroy_size());
379 vmw_surface_destroy_encode(res
->id
, cmd
);
380 vmw_fifo_commit(dev_priv
, vmw_surface_destroy_size());
383 * used_memory_size_atomic, or separate lock
384 * to avoid taking dev_priv::cmdbuf_mutex in
388 mutex_lock(&dev_priv
->cmdbuf_mutex
);
389 dev_priv
->used_memory_size
-= res
->backup_size
;
390 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
395 * vmw_legacy_srf_create - Create a device surface as part of the
396 * resource validation process.
398 * @res: Pointer to a struct vmw_surface.
400 * If the surface doesn't have a hw id.
402 * Returns -EBUSY if there wasn't sufficient device resources to
403 * complete the validation. Retry after freeing up resources.
405 * May return other errors if the kernel is out of guest resources.
407 static int vmw_legacy_srf_create(struct vmw_resource
*res
)
409 struct vmw_private
*dev_priv
= res
->dev_priv
;
410 struct vmw_surface
*srf
;
411 uint32_t submit_size
;
415 if (likely(res
->id
!= -1))
418 srf
= vmw_res_to_srf(res
);
419 if (unlikely(dev_priv
->used_memory_size
+ res
->backup_size
>=
420 dev_priv
->memory_size
))
424 * Alloc id for the resource.
427 ret
= vmw_resource_alloc_id(res
);
428 if (unlikely(ret
!= 0)) {
429 DRM_ERROR("Failed to allocate a surface id.\n");
433 if (unlikely(res
->id
>= SVGA3D_MAX_SURFACE_IDS
)) {
439 * Encode surface define- commands.
442 submit_size
= vmw_surface_define_size(srf
);
443 cmd
= VMW_FIFO_RESERVE(dev_priv
, submit_size
);
444 if (unlikely(!cmd
)) {
449 vmw_surface_define_encode(srf
, cmd
);
450 vmw_fifo_commit(dev_priv
, submit_size
);
451 vmw_fifo_resource_inc(dev_priv
);
454 * Surface memory usage accounting.
457 dev_priv
->used_memory_size
+= res
->backup_size
;
461 vmw_resource_release_id(res
);
467 * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
469 * @res: Pointer to a struct vmw_res embedded in a struct
471 * @val_buf: Pointer to a struct ttm_validate_buffer containing
472 * information about the backup buffer.
473 * @bind: Boolean wether to DMA to the surface.
475 * Transfer backup data to or from a legacy surface as part of the
476 * validation process.
477 * May return other errors if the kernel is out of guest resources.
478 * The backup buffer will be fenced or idle upon successful completion,
479 * and if the surface needs persistent backup storage, the backup buffer
480 * will also be returned reserved iff @bind is true.
482 static int vmw_legacy_srf_dma(struct vmw_resource
*res
,
483 struct ttm_validate_buffer
*val_buf
,
487 struct vmw_fence_obj
*fence
;
488 uint32_t submit_size
;
489 struct vmw_surface
*srf
= vmw_res_to_srf(res
);
491 struct vmw_private
*dev_priv
= res
->dev_priv
;
493 BUG_ON(!val_buf
->bo
);
494 submit_size
= vmw_surface_dma_size(srf
);
495 cmd
= VMW_FIFO_RESERVE(dev_priv
, submit_size
);
499 vmw_bo_get_guest_ptr(val_buf
->bo
, &ptr
);
500 vmw_surface_dma_encode(srf
, cmd
, &ptr
, bind
);
502 vmw_fifo_commit(dev_priv
, submit_size
);
505 * Create a fence object and fence the backup buffer.
508 (void) vmw_execbuf_fence_commands(NULL
, dev_priv
,
511 vmw_bo_fence_single(val_buf
->bo
, fence
);
513 if (likely(fence
!= NULL
))
514 vmw_fence_obj_unreference(&fence
);
520 * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
521 * surface validation process.
523 * @res: Pointer to a struct vmw_res embedded in a struct
525 * @val_buf: Pointer to a struct ttm_validate_buffer containing
526 * information about the backup buffer.
528 * This function will copy backup data to the surface if the
529 * backup buffer is dirty.
531 static int vmw_legacy_srf_bind(struct vmw_resource
*res
,
532 struct ttm_validate_buffer
*val_buf
)
534 if (!res
->backup_dirty
)
537 return vmw_legacy_srf_dma(res
, val_buf
, true);
542 * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
543 * surface eviction process.
545 * @res: Pointer to a struct vmw_res embedded in a struct
547 * @val_buf: Pointer to a struct ttm_validate_buffer containing
548 * information about the backup buffer.
550 * This function will copy backup data from the surface.
552 static int vmw_legacy_srf_unbind(struct vmw_resource
*res
,
554 struct ttm_validate_buffer
*val_buf
)
556 if (unlikely(readback
))
557 return vmw_legacy_srf_dma(res
, val_buf
, false);
562 * vmw_legacy_srf_destroy - Destroy a device surface as part of a
563 * resource eviction process.
565 * @res: Pointer to a struct vmw_res embedded in a struct
568 static int vmw_legacy_srf_destroy(struct vmw_resource
*res
)
570 struct vmw_private
*dev_priv
= res
->dev_priv
;
571 uint32_t submit_size
;
574 BUG_ON(res
->id
== -1);
577 * Encode the dma- and surface destroy commands.
580 submit_size
= vmw_surface_destroy_size();
581 cmd
= VMW_FIFO_RESERVE(dev_priv
, submit_size
);
585 vmw_surface_destroy_encode(res
->id
, cmd
);
586 vmw_fifo_commit(dev_priv
, submit_size
);
589 * Surface memory usage accounting.
592 dev_priv
->used_memory_size
-= res
->backup_size
;
595 * Release the surface ID.
598 vmw_resource_release_id(res
);
599 vmw_fifo_resource_dec(dev_priv
);
606 * vmw_surface_init - initialize a struct vmw_surface
608 * @dev_priv: Pointer to a device private struct.
609 * @srf: Pointer to the struct vmw_surface to initialize.
610 * @res_free: Pointer to a resource destructor used to free
613 static int vmw_surface_init(struct vmw_private
*dev_priv
,
614 struct vmw_surface
*srf
,
615 void (*res_free
) (struct vmw_resource
*res
))
618 struct vmw_resource
*res
= &srf
->res
;
621 ret
= vmw_resource_init(dev_priv
, res
, true, res_free
,
622 (dev_priv
->has_mob
) ? &vmw_gb_surface_func
:
623 &vmw_legacy_surface_func
);
625 if (unlikely(ret
!= 0)) {
631 * The surface won't be visible to hardware until a
635 INIT_LIST_HEAD(&srf
->view_list
);
636 res
->hw_destroy
= vmw_hw_surface_destroy
;
641 * vmw_user_surface_base_to_res - TTM base object to resource converter for
642 * user visible surfaces
644 * @base: Pointer to a TTM base object
646 * Returns the struct vmw_resource embedded in a struct vmw_surface
647 * for the user-visible object identified by the TTM base object @base.
649 static struct vmw_resource
*
650 vmw_user_surface_base_to_res(struct ttm_base_object
*base
)
652 return &(container_of(base
, struct vmw_user_surface
,
653 prime
.base
)->srf
.res
);
657 * vmw_user_surface_free - User visible surface resource destructor
659 * @res: A struct vmw_resource embedded in a struct vmw_surface.
661 static void vmw_user_surface_free(struct vmw_resource
*res
)
663 struct vmw_surface
*srf
= vmw_res_to_srf(res
);
664 struct vmw_user_surface
*user_srf
=
665 container_of(srf
, struct vmw_user_surface
, srf
);
666 struct vmw_private
*dev_priv
= srf
->res
.dev_priv
;
667 uint32_t size
= user_srf
->size
;
669 WARN_ON_ONCE(res
->dirty
);
670 if (user_srf
->master
)
671 drm_master_put(&user_srf
->master
);
673 kfree(srf
->metadata
.sizes
);
674 kfree(srf
->snooper
.image
);
675 ttm_prime_object_kfree(user_srf
, prime
);
676 ttm_mem_global_free(vmw_mem_glob(dev_priv
), size
);
680 * vmw_user_surface_free - User visible surface TTM base object destructor
682 * @p_base: Pointer to a pointer to a TTM base object
683 * embedded in a struct vmw_user_surface.
685 * Drops the base object's reference on its resource, and the
686 * pointer pointed to by *p_base is set to NULL.
688 static void vmw_user_surface_base_release(struct ttm_base_object
**p_base
)
690 struct ttm_base_object
*base
= *p_base
;
691 struct vmw_user_surface
*user_srf
=
692 container_of(base
, struct vmw_user_surface
, prime
.base
);
693 struct vmw_resource
*res
= &user_srf
->srf
.res
;
696 if (user_srf
->backup_base
)
697 ttm_base_object_unref(&user_srf
->backup_base
);
698 vmw_resource_unreference(&res
);
702 * vmw_user_surface_destroy_ioctl - Ioctl function implementing
703 * the user surface destroy functionality.
705 * @dev: Pointer to a struct drm_device.
706 * @data: Pointer to data copied from / to user-space.
707 * @file_priv: Pointer to a drm file private structure.
709 int vmw_surface_destroy_ioctl(struct drm_device
*dev
, void *data
,
710 struct drm_file
*file_priv
)
712 struct drm_vmw_surface_arg
*arg
= (struct drm_vmw_surface_arg
*)data
;
713 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
715 return ttm_ref_object_base_unref(tfile
, arg
->sid
, TTM_REF_USAGE
);
719 * vmw_user_surface_define_ioctl - Ioctl function implementing
720 * the user surface define functionality.
722 * @dev: Pointer to a struct drm_device.
723 * @data: Pointer to data copied from / to user-space.
724 * @file_priv: Pointer to a drm file private structure.
726 int vmw_surface_define_ioctl(struct drm_device
*dev
, void *data
,
727 struct drm_file
*file_priv
)
729 struct vmw_private
*dev_priv
= vmw_priv(dev
);
730 struct vmw_user_surface
*user_srf
;
731 struct vmw_surface
*srf
;
732 struct vmw_surface_metadata
*metadata
;
733 struct vmw_resource
*res
;
734 struct vmw_resource
*tmp
;
735 union drm_vmw_surface_create_arg
*arg
=
736 (union drm_vmw_surface_create_arg
*)data
;
737 struct drm_vmw_surface_create_req
*req
= &arg
->req
;
738 struct drm_vmw_surface_arg
*rep
= &arg
->rep
;
739 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
740 struct ttm_operation_ctx ctx
= {
741 .interruptible
= true,
746 uint32_t cur_bo_offset
;
747 struct drm_vmw_size
*cur_size
;
748 struct vmw_surface_offset
*cur_offset
;
751 const struct svga3d_surface_desc
*desc
;
753 if (unlikely(vmw_user_surface_size
== 0))
754 vmw_user_surface_size
= ttm_round_pot(sizeof(*user_srf
)) +
755 VMW_IDA_ACC_SIZE
+ TTM_OBJ_EXTRA_SIZE
;
758 for (i
= 0; i
< DRM_VMW_MAX_SURFACE_FACES
; ++i
) {
759 if (req
->mip_levels
[i
] > DRM_VMW_MAX_MIP_LEVELS
)
761 num_sizes
+= req
->mip_levels
[i
];
764 if (num_sizes
> DRM_VMW_MAX_SURFACE_FACES
* DRM_VMW_MAX_MIP_LEVELS
||
768 size
= vmw_user_surface_size
+
769 ttm_round_pot(num_sizes
* sizeof(struct drm_vmw_size
)) +
770 ttm_round_pot(num_sizes
* sizeof(struct vmw_surface_offset
));
772 desc
= svga3dsurface_get_desc(req
->format
);
773 if (unlikely(desc
->block_desc
== SVGA3DBLOCKDESC_NONE
)) {
774 VMW_DEBUG_USER("Invalid format %d for surface creation.\n",
779 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
780 if (unlikely(ret
!= 0))
783 ret
= ttm_mem_global_alloc(vmw_mem_glob(dev_priv
),
785 if (unlikely(ret
!= 0)) {
786 if (ret
!= -ERESTARTSYS
)
787 DRM_ERROR("Out of graphics memory for surface.\n");
791 user_srf
= kzalloc(sizeof(*user_srf
), GFP_KERNEL
);
792 if (unlikely(!user_srf
)) {
794 goto out_no_user_srf
;
797 srf
= &user_srf
->srf
;
798 metadata
= &srf
->metadata
;
801 /* Driver internally stores as 64-bit flags */
802 metadata
->flags
= (SVGA3dSurfaceAllFlags
)req
->flags
;
803 metadata
->format
= req
->format
;
804 metadata
->scanout
= req
->scanout
;
806 memcpy(metadata
->mip_levels
, req
->mip_levels
,
807 sizeof(metadata
->mip_levels
));
808 metadata
->num_sizes
= num_sizes
;
809 user_srf
->size
= size
;
811 memdup_user((struct drm_vmw_size __user
*)(unsigned long)
813 sizeof(*metadata
->sizes
) * metadata
->num_sizes
);
814 if (IS_ERR(metadata
->sizes
)) {
815 ret
= PTR_ERR(metadata
->sizes
);
818 srf
->offsets
= kmalloc_array(metadata
->num_sizes
, sizeof(*srf
->offsets
),
820 if (unlikely(!srf
->offsets
)) {
825 metadata
->base_size
= *srf
->metadata
.sizes
;
826 metadata
->autogen_filter
= SVGA3D_TEX_FILTER_NONE
;
827 metadata
->multisample_count
= 0;
828 metadata
->multisample_pattern
= SVGA3D_MS_PATTERN_NONE
;
829 metadata
->quality_level
= SVGA3D_MS_QUALITY_NONE
;
832 cur_offset
= srf
->offsets
;
833 cur_size
= metadata
->sizes
;
835 for (i
= 0; i
< DRM_VMW_MAX_SURFACE_FACES
; ++i
) {
836 for (j
= 0; j
< metadata
->mip_levels
[i
]; ++j
) {
837 uint32_t stride
= svga3dsurface_calculate_pitch
840 cur_offset
->face
= i
;
842 cur_offset
->bo_offset
= cur_bo_offset
;
843 cur_bo_offset
+= svga3dsurface_get_image_buffer_size
844 (desc
, cur_size
, stride
);
849 res
->backup_size
= cur_bo_offset
;
850 if (metadata
->scanout
&&
851 metadata
->num_sizes
== 1 &&
852 metadata
->sizes
[0].width
== 64 &&
853 metadata
->sizes
[0].height
== 64 &&
854 metadata
->format
== SVGA3D_A8R8G8B8
) {
856 srf
->snooper
.image
= kzalloc(64 * 64 * 4, GFP_KERNEL
);
857 if (!srf
->snooper
.image
) {
858 DRM_ERROR("Failed to allocate cursor_image\n");
863 srf
->snooper
.image
= NULL
;
866 user_srf
->prime
.base
.shareable
= false;
867 user_srf
->prime
.base
.tfile
= NULL
;
868 if (drm_is_primary_client(file_priv
))
869 user_srf
->master
= drm_master_get(file_priv
->master
);
872 * From this point, the generic resource management functions
873 * destroy the object on failure.
876 ret
= vmw_surface_init(dev_priv
, srf
, vmw_user_surface_free
);
877 if (unlikely(ret
!= 0))
881 * A gb-aware client referencing a shared surface will
882 * expect a backup buffer to be present.
884 if (dev_priv
->has_mob
&& req
->shareable
) {
885 uint32_t backup_handle
;
887 ret
= vmw_user_bo_alloc(dev_priv
, tfile
,
892 &user_srf
->backup_base
);
893 if (unlikely(ret
!= 0)) {
894 vmw_resource_unreference(&res
);
899 tmp
= vmw_resource_reference(&srf
->res
);
900 ret
= ttm_prime_object_init(tfile
, res
->backup_size
, &user_srf
->prime
,
901 req
->shareable
, VMW_RES_SURFACE
,
902 &vmw_user_surface_base_release
, NULL
);
904 if (unlikely(ret
!= 0)) {
905 vmw_resource_unreference(&tmp
);
906 vmw_resource_unreference(&res
);
910 rep
->sid
= user_srf
->prime
.base
.handle
;
911 vmw_resource_unreference(&res
);
913 ttm_read_unlock(&dev_priv
->reservation_sem
);
918 kfree(metadata
->sizes
);
920 ttm_prime_object_kfree(user_srf
, prime
);
922 ttm_mem_global_free(vmw_mem_glob(dev_priv
), size
);
924 ttm_read_unlock(&dev_priv
->reservation_sem
);
930 vmw_surface_handle_reference(struct vmw_private
*dev_priv
,
931 struct drm_file
*file_priv
,
933 enum drm_vmw_handle_type handle_type
,
934 struct ttm_base_object
**base_p
)
936 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
937 struct vmw_user_surface
*user_srf
;
939 struct ttm_base_object
*base
;
942 if (handle_type
== DRM_VMW_HANDLE_PRIME
) {
943 ret
= ttm_prime_fd_to_handle(tfile
, u_handle
, &handle
);
944 if (unlikely(ret
!= 0))
951 base
= ttm_base_object_lookup_for_ref(dev_priv
->tdev
, handle
);
952 if (unlikely(!base
)) {
953 VMW_DEBUG_USER("Could not find surface to reference.\n");
957 if (unlikely(ttm_base_object_type(base
) != VMW_RES_SURFACE
)) {
958 VMW_DEBUG_USER("Referenced object is not a surface.\n");
959 goto out_bad_resource
;
962 if (handle_type
!= DRM_VMW_HANDLE_PRIME
) {
963 bool require_exist
= false;
965 user_srf
= container_of(base
, struct vmw_user_surface
,
968 /* Error out if we are unauthenticated primary */
969 if (drm_is_primary_client(file_priv
) &&
970 !file_priv
->authenticated
) {
972 goto out_bad_resource
;
976 * Make sure the surface creator has the same
977 * authenticating master, or is already registered with us.
979 if (drm_is_primary_client(file_priv
) &&
980 user_srf
->master
!= file_priv
->master
)
981 require_exist
= true;
983 if (unlikely(drm_is_render_client(file_priv
)))
984 require_exist
= true;
986 ret
= ttm_ref_object_add(tfile
, base
, TTM_REF_USAGE
, NULL
,
988 if (unlikely(ret
!= 0)) {
989 DRM_ERROR("Could not add a reference to a surface.\n");
990 goto out_bad_resource
;
998 ttm_base_object_unref(&base
);
1000 if (handle_type
== DRM_VMW_HANDLE_PRIME
)
1001 (void) ttm_ref_object_base_unref(tfile
, handle
, TTM_REF_USAGE
);
1007 * vmw_user_surface_define_ioctl - Ioctl function implementing
1008 * the user surface reference functionality.
1010 * @dev: Pointer to a struct drm_device.
1011 * @data: Pointer to data copied from / to user-space.
1012 * @file_priv: Pointer to a drm file private structure.
1014 int vmw_surface_reference_ioctl(struct drm_device
*dev
, void *data
,
1015 struct drm_file
*file_priv
)
1017 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1018 union drm_vmw_surface_reference_arg
*arg
=
1019 (union drm_vmw_surface_reference_arg
*)data
;
1020 struct drm_vmw_surface_arg
*req
= &arg
->req
;
1021 struct drm_vmw_surface_create_req
*rep
= &arg
->rep
;
1022 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1023 struct vmw_surface
*srf
;
1024 struct vmw_user_surface
*user_srf
;
1025 struct drm_vmw_size __user
*user_sizes
;
1026 struct ttm_base_object
*base
;
1029 ret
= vmw_surface_handle_reference(dev_priv
, file_priv
, req
->sid
,
1030 req
->handle_type
, &base
);
1031 if (unlikely(ret
!= 0))
1034 user_srf
= container_of(base
, struct vmw_user_surface
, prime
.base
);
1035 srf
= &user_srf
->srf
;
1037 /* Downcast of flags when sending back to user space */
1038 rep
->flags
= (uint32_t)srf
->metadata
.flags
;
1039 rep
->format
= srf
->metadata
.format
;
1040 memcpy(rep
->mip_levels
, srf
->metadata
.mip_levels
,
1041 sizeof(srf
->metadata
.mip_levels
));
1042 user_sizes
= (struct drm_vmw_size __user
*)(unsigned long)
1046 ret
= copy_to_user(user_sizes
, &srf
->metadata
.base_size
,
1047 sizeof(srf
->metadata
.base_size
));
1048 if (unlikely(ret
!= 0)) {
1049 VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes
,
1050 srf
->metadata
.num_sizes
);
1051 ttm_ref_object_base_unref(tfile
, base
->handle
, TTM_REF_USAGE
);
1055 ttm_base_object_unref(&base
);
1061 * vmw_surface_define_encode - Encode a surface_define command.
1063 * @srf: Pointer to a struct vmw_surface object.
1064 * @cmd_space: Pointer to memory area in which the commands should be encoded.
1066 static int vmw_gb_surface_create(struct vmw_resource
*res
)
1068 struct vmw_private
*dev_priv
= res
->dev_priv
;
1069 struct vmw_surface
*srf
= vmw_res_to_srf(res
);
1070 struct vmw_surface_metadata
*metadata
= &srf
->metadata
;
1071 uint32_t cmd_len
, cmd_id
, submit_len
;
1074 SVGA3dCmdHeader header
;
1075 SVGA3dCmdDefineGBSurface body
;
1078 SVGA3dCmdHeader header
;
1079 SVGA3dCmdDefineGBSurface_v2 body
;
1082 SVGA3dCmdHeader header
;
1083 SVGA3dCmdDefineGBSurface_v3 body
;
1086 SVGA3dCmdHeader header
;
1087 SVGA3dCmdDefineGBSurface_v4 body
;
1090 if (likely(res
->id
!= -1))
1093 vmw_fifo_resource_inc(dev_priv
);
1094 ret
= vmw_resource_alloc_id(res
);
1095 if (unlikely(ret
!= 0)) {
1096 DRM_ERROR("Failed to allocate a surface id.\n");
1100 if (unlikely(res
->id
>= VMWGFX_NUM_GB_SURFACE
)) {
1105 if (has_sm5_context(dev_priv
) && metadata
->array_size
> 0) {
1106 cmd_id
= SVGA_3D_CMD_DEFINE_GB_SURFACE_V4
;
1107 cmd_len
= sizeof(cmd4
->body
);
1108 submit_len
= sizeof(*cmd4
);
1109 } else if (has_sm4_1_context(dev_priv
) && metadata
->array_size
> 0) {
1110 cmd_id
= SVGA_3D_CMD_DEFINE_GB_SURFACE_V3
;
1111 cmd_len
= sizeof(cmd3
->body
);
1112 submit_len
= sizeof(*cmd3
);
1113 } else if (metadata
->array_size
> 0) {
1114 /* VMW_SM_4 support verified at creation time. */
1115 cmd_id
= SVGA_3D_CMD_DEFINE_GB_SURFACE_V2
;
1116 cmd_len
= sizeof(cmd2
->body
);
1117 submit_len
= sizeof(*cmd2
);
1119 cmd_id
= SVGA_3D_CMD_DEFINE_GB_SURFACE
;
1120 cmd_len
= sizeof(cmd
->body
);
1121 submit_len
= sizeof(*cmd
);
1124 cmd
= VMW_FIFO_RESERVE(dev_priv
, submit_len
);
1125 cmd2
= (typeof(cmd2
))cmd
;
1126 cmd3
= (typeof(cmd3
))cmd
;
1127 cmd4
= (typeof(cmd4
))cmd
;
1128 if (unlikely(!cmd
)) {
1133 if (has_sm5_context(dev_priv
) && metadata
->array_size
> 0) {
1134 cmd4
->header
.id
= cmd_id
;
1135 cmd4
->header
.size
= cmd_len
;
1136 cmd4
->body
.sid
= srf
->res
.id
;
1137 cmd4
->body
.surfaceFlags
= metadata
->flags
;
1138 cmd4
->body
.format
= metadata
->format
;
1139 cmd4
->body
.numMipLevels
= metadata
->mip_levels
[0];
1140 cmd4
->body
.multisampleCount
= metadata
->multisample_count
;
1141 cmd4
->body
.multisamplePattern
= metadata
->multisample_pattern
;
1142 cmd4
->body
.qualityLevel
= metadata
->quality_level
;
1143 cmd4
->body
.autogenFilter
= metadata
->autogen_filter
;
1144 cmd4
->body
.size
.width
= metadata
->base_size
.width
;
1145 cmd4
->body
.size
.height
= metadata
->base_size
.height
;
1146 cmd4
->body
.size
.depth
= metadata
->base_size
.depth
;
1147 cmd4
->body
.arraySize
= metadata
->array_size
;
1148 cmd4
->body
.bufferByteStride
= metadata
->buffer_byte_stride
;
1149 } else if (has_sm4_1_context(dev_priv
) && metadata
->array_size
> 0) {
1150 cmd3
->header
.id
= cmd_id
;
1151 cmd3
->header
.size
= cmd_len
;
1152 cmd3
->body
.sid
= srf
->res
.id
;
1153 cmd3
->body
.surfaceFlags
= metadata
->flags
;
1154 cmd3
->body
.format
= metadata
->format
;
1155 cmd3
->body
.numMipLevels
= metadata
->mip_levels
[0];
1156 cmd3
->body
.multisampleCount
= metadata
->multisample_count
;
1157 cmd3
->body
.multisamplePattern
= metadata
->multisample_pattern
;
1158 cmd3
->body
.qualityLevel
= metadata
->quality_level
;
1159 cmd3
->body
.autogenFilter
= metadata
->autogen_filter
;
1160 cmd3
->body
.size
.width
= metadata
->base_size
.width
;
1161 cmd3
->body
.size
.height
= metadata
->base_size
.height
;
1162 cmd3
->body
.size
.depth
= metadata
->base_size
.depth
;
1163 cmd3
->body
.arraySize
= metadata
->array_size
;
1164 } else if (metadata
->array_size
> 0) {
1165 cmd2
->header
.id
= cmd_id
;
1166 cmd2
->header
.size
= cmd_len
;
1167 cmd2
->body
.sid
= srf
->res
.id
;
1168 cmd2
->body
.surfaceFlags
= metadata
->flags
;
1169 cmd2
->body
.format
= metadata
->format
;
1170 cmd2
->body
.numMipLevels
= metadata
->mip_levels
[0];
1171 cmd2
->body
.multisampleCount
= metadata
->multisample_count
;
1172 cmd2
->body
.autogenFilter
= metadata
->autogen_filter
;
1173 cmd2
->body
.size
.width
= metadata
->base_size
.width
;
1174 cmd2
->body
.size
.height
= metadata
->base_size
.height
;
1175 cmd2
->body
.size
.depth
= metadata
->base_size
.depth
;
1176 cmd2
->body
.arraySize
= metadata
->array_size
;
1178 cmd
->header
.id
= cmd_id
;
1179 cmd
->header
.size
= cmd_len
;
1180 cmd
->body
.sid
= srf
->res
.id
;
1181 cmd
->body
.surfaceFlags
= metadata
->flags
;
1182 cmd
->body
.format
= metadata
->format
;
1183 cmd
->body
.numMipLevels
= metadata
->mip_levels
[0];
1184 cmd
->body
.multisampleCount
= metadata
->multisample_count
;
1185 cmd
->body
.autogenFilter
= metadata
->autogen_filter
;
1186 cmd
->body
.size
.width
= metadata
->base_size
.width
;
1187 cmd
->body
.size
.height
= metadata
->base_size
.height
;
1188 cmd
->body
.size
.depth
= metadata
->base_size
.depth
;
1191 vmw_fifo_commit(dev_priv
, submit_len
);
1196 vmw_resource_release_id(res
);
1198 vmw_fifo_resource_dec(dev_priv
);
1203 static int vmw_gb_surface_bind(struct vmw_resource
*res
,
1204 struct ttm_validate_buffer
*val_buf
)
1206 struct vmw_private
*dev_priv
= res
->dev_priv
;
1208 SVGA3dCmdHeader header
;
1209 SVGA3dCmdBindGBSurface body
;
1212 SVGA3dCmdHeader header
;
1213 SVGA3dCmdUpdateGBSurface body
;
1215 uint32_t submit_size
;
1216 struct ttm_buffer_object
*bo
= val_buf
->bo
;
1218 BUG_ON(bo
->mem
.mem_type
!= VMW_PL_MOB
);
1220 submit_size
= sizeof(*cmd1
) + (res
->backup_dirty
? sizeof(*cmd2
) : 0);
1222 cmd1
= VMW_FIFO_RESERVE(dev_priv
, submit_size
);
1223 if (unlikely(!cmd1
))
1226 cmd1
->header
.id
= SVGA_3D_CMD_BIND_GB_SURFACE
;
1227 cmd1
->header
.size
= sizeof(cmd1
->body
);
1228 cmd1
->body
.sid
= res
->id
;
1229 cmd1
->body
.mobid
= bo
->mem
.start
;
1230 if (res
->backup_dirty
) {
1231 cmd2
= (void *) &cmd1
[1];
1232 cmd2
->header
.id
= SVGA_3D_CMD_UPDATE_GB_SURFACE
;
1233 cmd2
->header
.size
= sizeof(cmd2
->body
);
1234 cmd2
->body
.sid
= res
->id
;
1236 vmw_fifo_commit(dev_priv
, submit_size
);
1238 if (res
->backup
->dirty
&& res
->backup_dirty
) {
1239 /* We've just made a full upload. Cear dirty regions. */
1240 vmw_bo_dirty_clear_res(res
);
1243 res
->backup_dirty
= false;
1248 static int vmw_gb_surface_unbind(struct vmw_resource
*res
,
1250 struct ttm_validate_buffer
*val_buf
)
1252 struct vmw_private
*dev_priv
= res
->dev_priv
;
1253 struct ttm_buffer_object
*bo
= val_buf
->bo
;
1254 struct vmw_fence_obj
*fence
;
1257 SVGA3dCmdHeader header
;
1258 SVGA3dCmdReadbackGBSurface body
;
1261 SVGA3dCmdHeader header
;
1262 SVGA3dCmdInvalidateGBSurface body
;
1265 SVGA3dCmdHeader header
;
1266 SVGA3dCmdBindGBSurface body
;
1268 uint32_t submit_size
;
1272 BUG_ON(bo
->mem
.mem_type
!= VMW_PL_MOB
);
1274 submit_size
= sizeof(*cmd3
) + (readback
? sizeof(*cmd1
) : sizeof(*cmd2
));
1275 cmd
= VMW_FIFO_RESERVE(dev_priv
, submit_size
);
1280 cmd1
= (void *) cmd
;
1281 cmd1
->header
.id
= SVGA_3D_CMD_READBACK_GB_SURFACE
;
1282 cmd1
->header
.size
= sizeof(cmd1
->body
);
1283 cmd1
->body
.sid
= res
->id
;
1284 cmd3
= (void *) &cmd1
[1];
1286 cmd2
= (void *) cmd
;
1287 cmd2
->header
.id
= SVGA_3D_CMD_INVALIDATE_GB_SURFACE
;
1288 cmd2
->header
.size
= sizeof(cmd2
->body
);
1289 cmd2
->body
.sid
= res
->id
;
1290 cmd3
= (void *) &cmd2
[1];
1293 cmd3
->header
.id
= SVGA_3D_CMD_BIND_GB_SURFACE
;
1294 cmd3
->header
.size
= sizeof(cmd3
->body
);
1295 cmd3
->body
.sid
= res
->id
;
1296 cmd3
->body
.mobid
= SVGA3D_INVALID_ID
;
1298 vmw_fifo_commit(dev_priv
, submit_size
);
1301 * Create a fence object and fence the backup buffer.
1304 (void) vmw_execbuf_fence_commands(NULL
, dev_priv
,
1307 vmw_bo_fence_single(val_buf
->bo
, fence
);
1309 if (likely(fence
!= NULL
))
1310 vmw_fence_obj_unreference(&fence
);
1315 static int vmw_gb_surface_destroy(struct vmw_resource
*res
)
1317 struct vmw_private
*dev_priv
= res
->dev_priv
;
1318 struct vmw_surface
*srf
= vmw_res_to_srf(res
);
1320 SVGA3dCmdHeader header
;
1321 SVGA3dCmdDestroyGBSurface body
;
1324 if (likely(res
->id
== -1))
1327 mutex_lock(&dev_priv
->binding_mutex
);
1328 vmw_view_surface_list_destroy(dev_priv
, &srf
->view_list
);
1329 vmw_binding_res_list_scrub(&res
->binding_head
);
1331 cmd
= VMW_FIFO_RESERVE(dev_priv
, sizeof(*cmd
));
1332 if (unlikely(!cmd
)) {
1333 mutex_unlock(&dev_priv
->binding_mutex
);
1337 cmd
->header
.id
= SVGA_3D_CMD_DESTROY_GB_SURFACE
;
1338 cmd
->header
.size
= sizeof(cmd
->body
);
1339 cmd
->body
.sid
= res
->id
;
1340 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
1341 mutex_unlock(&dev_priv
->binding_mutex
);
1342 vmw_resource_release_id(res
);
1343 vmw_fifo_resource_dec(dev_priv
);
1349 * vmw_gb_surface_define_ioctl - Ioctl function implementing
1350 * the user surface define functionality.
1352 * @dev: Pointer to a struct drm_device.
1353 * @data: Pointer to data copied from / to user-space.
1354 * @file_priv: Pointer to a drm file private structure.
1356 int vmw_gb_surface_define_ioctl(struct drm_device
*dev
, void *data
,
1357 struct drm_file
*file_priv
)
1359 union drm_vmw_gb_surface_create_arg
*arg
=
1360 (union drm_vmw_gb_surface_create_arg
*)data
;
1361 struct drm_vmw_gb_surface_create_rep
*rep
= &arg
->rep
;
1362 struct drm_vmw_gb_surface_create_ext_req req_ext
;
1364 req_ext
.base
= arg
->req
;
1365 req_ext
.version
= drm_vmw_gb_surface_v1
;
1366 req_ext
.svga3d_flags_upper_32_bits
= 0;
1367 req_ext
.multisample_pattern
= SVGA3D_MS_PATTERN_NONE
;
1368 req_ext
.quality_level
= SVGA3D_MS_QUALITY_NONE
;
1369 req_ext
.buffer_byte_stride
= 0;
1370 req_ext
.must_be_zero
= 0;
1372 return vmw_gb_surface_define_internal(dev
, &req_ext
, rep
, file_priv
);
1376 * vmw_gb_surface_reference_ioctl - Ioctl function implementing
1377 * the user surface reference functionality.
1379 * @dev: Pointer to a struct drm_device.
1380 * @data: Pointer to data copied from / to user-space.
1381 * @file_priv: Pointer to a drm file private structure.
1383 int vmw_gb_surface_reference_ioctl(struct drm_device
*dev
, void *data
,
1384 struct drm_file
*file_priv
)
1386 union drm_vmw_gb_surface_reference_arg
*arg
=
1387 (union drm_vmw_gb_surface_reference_arg
*)data
;
1388 struct drm_vmw_surface_arg
*req
= &arg
->req
;
1389 struct drm_vmw_gb_surface_ref_rep
*rep
= &arg
->rep
;
1390 struct drm_vmw_gb_surface_ref_ext_rep rep_ext
;
1393 ret
= vmw_gb_surface_reference_internal(dev
, req
, &rep_ext
, file_priv
);
1395 if (unlikely(ret
!= 0))
1398 rep
->creq
= rep_ext
.creq
.base
;
1399 rep
->crep
= rep_ext
.crep
;
1405 * vmw_gb_surface_define_ext_ioctl - Ioctl function implementing
1406 * the user surface define functionality.
1408 * @dev: Pointer to a struct drm_device.
1409 * @data: Pointer to data copied from / to user-space.
1410 * @file_priv: Pointer to a drm file private structure.
1412 int vmw_gb_surface_define_ext_ioctl(struct drm_device
*dev
, void *data
,
1413 struct drm_file
*file_priv
)
1415 union drm_vmw_gb_surface_create_ext_arg
*arg
=
1416 (union drm_vmw_gb_surface_create_ext_arg
*)data
;
1417 struct drm_vmw_gb_surface_create_ext_req
*req
= &arg
->req
;
1418 struct drm_vmw_gb_surface_create_rep
*rep
= &arg
->rep
;
1420 return vmw_gb_surface_define_internal(dev
, req
, rep
, file_priv
);
1424 * vmw_gb_surface_reference_ext_ioctl - Ioctl function implementing
1425 * the user surface reference functionality.
1427 * @dev: Pointer to a struct drm_device.
1428 * @data: Pointer to data copied from / to user-space.
1429 * @file_priv: Pointer to a drm file private structure.
1431 int vmw_gb_surface_reference_ext_ioctl(struct drm_device
*dev
, void *data
,
1432 struct drm_file
*file_priv
)
1434 union drm_vmw_gb_surface_reference_ext_arg
*arg
=
1435 (union drm_vmw_gb_surface_reference_ext_arg
*)data
;
1436 struct drm_vmw_surface_arg
*req
= &arg
->req
;
1437 struct drm_vmw_gb_surface_ref_ext_rep
*rep
= &arg
->rep
;
1439 return vmw_gb_surface_reference_internal(dev
, req
, rep
, file_priv
);
1443 * vmw_gb_surface_define_internal - Ioctl function implementing
1444 * the user surface define functionality.
1446 * @dev: Pointer to a struct drm_device.
1447 * @req: Request argument from user-space.
1448 * @rep: Response argument to user-space.
1449 * @file_priv: Pointer to a drm file private structure.
1452 vmw_gb_surface_define_internal(struct drm_device
*dev
,
1453 struct drm_vmw_gb_surface_create_ext_req
*req
,
1454 struct drm_vmw_gb_surface_create_rep
*rep
,
1455 struct drm_file
*file_priv
)
1457 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1458 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1459 struct vmw_user_surface
*user_srf
;
1460 struct vmw_surface_metadata metadata
= {0};
1461 struct vmw_surface
*srf
;
1462 struct vmw_resource
*res
;
1463 struct vmw_resource
*tmp
;
1466 uint32_t backup_handle
= 0;
1467 SVGA3dSurfaceAllFlags svga3d_flags_64
=
1468 SVGA3D_FLAGS_64(req
->svga3d_flags_upper_32_bits
,
1469 req
->base
.svga3d_flags
);
1471 /* array_size must be null for non-GL3 host. */
1472 if (req
->base
.array_size
> 0 && !has_sm4_context(dev_priv
)) {
1473 VMW_DEBUG_USER("SM4 surface not supported.\n");
1477 if (!has_sm4_1_context(dev_priv
)) {
1478 if (req
->svga3d_flags_upper_32_bits
!= 0)
1481 if (req
->base
.multisample_count
!= 0)
1484 if (req
->multisample_pattern
!= SVGA3D_MS_PATTERN_NONE
)
1487 if (req
->quality_level
!= SVGA3D_MS_QUALITY_NONE
)
1491 VMW_DEBUG_USER("SM4.1 surface not supported.\n");
1496 if (req
->buffer_byte_stride
> 0 && !has_sm5_context(dev_priv
)) {
1497 VMW_DEBUG_USER("SM5 surface not supported.\n");
1501 if ((svga3d_flags_64
& SVGA3D_SURFACE_MULTISAMPLE
) &&
1502 req
->base
.multisample_count
== 0) {
1503 VMW_DEBUG_USER("Invalid sample count.\n");
1507 if (req
->base
.mip_levels
> DRM_VMW_MAX_MIP_LEVELS
) {
1508 VMW_DEBUG_USER("Invalid mip level.\n");
1512 if (unlikely(vmw_user_surface_size
== 0))
1513 vmw_user_surface_size
= ttm_round_pot(sizeof(*user_srf
)) +
1514 VMW_IDA_ACC_SIZE
+ TTM_OBJ_EXTRA_SIZE
;
1516 size
= vmw_user_surface_size
;
1518 metadata
.flags
= svga3d_flags_64
;
1519 metadata
.format
= req
->base
.format
;
1520 metadata
.mip_levels
[0] = req
->base
.mip_levels
;
1521 metadata
.multisample_count
= req
->base
.multisample_count
;
1522 metadata
.multisample_pattern
= req
->multisample_pattern
;
1523 metadata
.quality_level
= req
->quality_level
;
1524 metadata
.array_size
= req
->base
.array_size
;
1525 metadata
.buffer_byte_stride
= req
->buffer_byte_stride
;
1526 metadata
.num_sizes
= 1;
1527 metadata
.base_size
= req
->base
.base_size
;
1528 metadata
.scanout
= req
->base
.drm_surface_flags
&
1529 drm_vmw_surface_flag_scanout
;
1531 /* Define a surface based on the parameters. */
1532 ret
= vmw_gb_surface_define(dev_priv
, size
, &metadata
, &srf
);
1534 VMW_DEBUG_USER("Failed to define surface.\n");
1538 user_srf
= container_of(srf
, struct vmw_user_surface
, srf
);
1539 if (drm_is_primary_client(file_priv
))
1540 user_srf
->master
= drm_master_get(file_priv
->master
);
1542 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
1543 if (unlikely(ret
!= 0))
1546 res
= &user_srf
->srf
.res
;
1548 if (req
->base
.buffer_handle
!= SVGA3D_INVALID_ID
) {
1549 ret
= vmw_user_bo_lookup(tfile
, req
->base
.buffer_handle
,
1551 &user_srf
->backup_base
);
1553 if (res
->backup
->base
.num_pages
* PAGE_SIZE
<
1555 VMW_DEBUG_USER("Surface backup buffer too small.\n");
1556 vmw_bo_unreference(&res
->backup
);
1560 backup_handle
= req
->base
.buffer_handle
;
1563 } else if (req
->base
.drm_surface_flags
&
1564 (drm_vmw_surface_flag_create_buffer
|
1565 drm_vmw_surface_flag_coherent
))
1566 ret
= vmw_user_bo_alloc(dev_priv
, tfile
,
1568 req
->base
.drm_surface_flags
&
1569 drm_vmw_surface_flag_shareable
,
1572 &user_srf
->backup_base
);
1574 if (unlikely(ret
!= 0)) {
1575 vmw_resource_unreference(&res
);
1579 if (req
->base
.drm_surface_flags
& drm_vmw_surface_flag_coherent
) {
1580 struct vmw_buffer_object
*backup
= res
->backup
;
1582 ttm_bo_reserve(&backup
->base
, false, false, NULL
);
1583 if (!res
->func
->dirty_alloc
)
1586 ret
= vmw_bo_dirty_add(backup
);
1588 res
->coherent
= true;
1589 ret
= res
->func
->dirty_alloc(res
);
1591 ttm_bo_unreserve(&backup
->base
);
1593 vmw_resource_unreference(&res
);
1599 tmp
= vmw_resource_reference(res
);
1600 ret
= ttm_prime_object_init(tfile
, res
->backup_size
, &user_srf
->prime
,
1601 req
->base
.drm_surface_flags
&
1602 drm_vmw_surface_flag_shareable
,
1604 &vmw_user_surface_base_release
, NULL
);
1606 if (unlikely(ret
!= 0)) {
1607 vmw_resource_unreference(&tmp
);
1608 vmw_resource_unreference(&res
);
1612 rep
->handle
= user_srf
->prime
.base
.handle
;
1613 rep
->backup_size
= res
->backup_size
;
1615 rep
->buffer_map_handle
=
1616 drm_vma_node_offset_addr(&res
->backup
->base
.base
.vma_node
);
1617 rep
->buffer_size
= res
->backup
->base
.num_pages
* PAGE_SIZE
;
1618 rep
->buffer_handle
= backup_handle
;
1620 rep
->buffer_map_handle
= 0;
1621 rep
->buffer_size
= 0;
1622 rep
->buffer_handle
= SVGA3D_INVALID_ID
;
1625 vmw_resource_unreference(&res
);
1628 ttm_read_unlock(&dev_priv
->reservation_sem
);
1633 * vmw_gb_surface_reference_internal - Ioctl function implementing
1634 * the user surface reference functionality.
1636 * @dev: Pointer to a struct drm_device.
1637 * @req: Pointer to user-space request surface arg.
1638 * @rep: Pointer to response to user-space.
1639 * @file_priv: Pointer to a drm file private structure.
1642 vmw_gb_surface_reference_internal(struct drm_device
*dev
,
1643 struct drm_vmw_surface_arg
*req
,
1644 struct drm_vmw_gb_surface_ref_ext_rep
*rep
,
1645 struct drm_file
*file_priv
)
1647 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1648 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1649 struct vmw_surface
*srf
;
1650 struct vmw_user_surface
*user_srf
;
1651 struct vmw_surface_metadata
*metadata
;
1652 struct ttm_base_object
*base
;
1653 uint32_t backup_handle
;
1656 ret
= vmw_surface_handle_reference(dev_priv
, file_priv
, req
->sid
,
1657 req
->handle_type
, &base
);
1658 if (unlikely(ret
!= 0))
1661 user_srf
= container_of(base
, struct vmw_user_surface
, prime
.base
);
1662 srf
= &user_srf
->srf
;
1663 if (!srf
->res
.backup
) {
1664 DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
1665 goto out_bad_resource
;
1667 metadata
= &srf
->metadata
;
1669 mutex_lock(&dev_priv
->cmdbuf_mutex
); /* Protect res->backup */
1670 ret
= vmw_user_bo_reference(tfile
, srf
->res
.backup
, &backup_handle
);
1671 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
1673 if (unlikely(ret
!= 0)) {
1674 DRM_ERROR("Could not add a reference to a GB surface "
1675 "backup buffer.\n");
1676 (void) ttm_ref_object_base_unref(tfile
, base
->handle
,
1678 goto out_bad_resource
;
1681 rep
->creq
.base
.svga3d_flags
= SVGA3D_FLAGS_LOWER_32(metadata
->flags
);
1682 rep
->creq
.base
.format
= metadata
->format
;
1683 rep
->creq
.base
.mip_levels
= metadata
->mip_levels
[0];
1684 rep
->creq
.base
.drm_surface_flags
= 0;
1685 rep
->creq
.base
.multisample_count
= metadata
->multisample_count
;
1686 rep
->creq
.base
.autogen_filter
= metadata
->autogen_filter
;
1687 rep
->creq
.base
.array_size
= metadata
->array_size
;
1688 rep
->creq
.base
.buffer_handle
= backup_handle
;
1689 rep
->creq
.base
.base_size
= metadata
->base_size
;
1690 rep
->crep
.handle
= user_srf
->prime
.base
.handle
;
1691 rep
->crep
.backup_size
= srf
->res
.backup_size
;
1692 rep
->crep
.buffer_handle
= backup_handle
;
1693 rep
->crep
.buffer_map_handle
=
1694 drm_vma_node_offset_addr(&srf
->res
.backup
->base
.base
.vma_node
);
1695 rep
->crep
.buffer_size
= srf
->res
.backup
->base
.num_pages
* PAGE_SIZE
;
1697 rep
->creq
.version
= drm_vmw_gb_surface_v1
;
1698 rep
->creq
.svga3d_flags_upper_32_bits
=
1699 SVGA3D_FLAGS_UPPER_32(metadata
->flags
);
1700 rep
->creq
.multisample_pattern
= metadata
->multisample_pattern
;
1701 rep
->creq
.quality_level
= metadata
->quality_level
;
1702 rep
->creq
.must_be_zero
= 0;
1705 ttm_base_object_unref(&base
);
1711 * vmw_subres_dirty_add - Add a dirty region to a subresource
1712 * @dirty: The surfaces's dirty tracker.
1713 * @loc_start: The location corresponding to the start of the region.
1714 * @loc_end: The location corresponding to the end of the region.
1716 * As we are assuming that @loc_start and @loc_end represent a sequential
1717 * range of backing store memory, if the region spans multiple lines then
1718 * regardless of the x coordinate, the full lines are dirtied.
1719 * Correspondingly if the region spans multiple z slices, then full rather
1720 * than partial z slices are dirtied.
1722 static void vmw_subres_dirty_add(struct vmw_surface_dirty
*dirty
,
1723 const struct svga3dsurface_loc
*loc_start
,
1724 const struct svga3dsurface_loc
*loc_end
)
1726 const struct svga3dsurface_cache
*cache
= &dirty
->cache
;
1727 SVGA3dBox
*box
= &dirty
->boxes
[loc_start
->sub_resource
];
1728 u32 mip
= loc_start
->sub_resource
% cache
->num_mip_levels
;
1729 const struct drm_vmw_size
*size
= &cache
->mip
[mip
].size
;
1730 u32 box_c2
= box
->z
+ box
->d
;
1732 if (WARN_ON(loc_start
->sub_resource
>= dirty
->num_subres
))
1735 if (box
->d
== 0 || box
->z
> loc_start
->z
)
1736 box
->z
= loc_start
->z
;
1737 if (box_c2
< loc_end
->z
)
1738 box
->d
= loc_end
->z
- box
->z
;
1740 if (loc_start
->z
+ 1 == loc_end
->z
) {
1741 box_c2
= box
->y
+ box
->h
;
1742 if (box
->h
== 0 || box
->y
> loc_start
->y
)
1743 box
->y
= loc_start
->y
;
1744 if (box_c2
< loc_end
->y
)
1745 box
->h
= loc_end
->y
- box
->y
;
1747 if (loc_start
->y
+ 1 == loc_end
->y
) {
1748 box_c2
= box
->x
+ box
->w
;
1749 if (box
->w
== 0 || box
->x
> loc_start
->x
)
1750 box
->x
= loc_start
->x
;
1751 if (box_c2
< loc_end
->x
)
1752 box
->w
= loc_end
->x
- box
->x
;
1755 box
->w
= size
->width
;
1759 box
->h
= size
->height
;
1761 box
->w
= size
->width
;
1766 * vmw_subres_dirty_full - Mark a full subresource as dirty
1767 * @dirty: The surface's dirty tracker.
1768 * @subres: The subresource
1770 static void vmw_subres_dirty_full(struct vmw_surface_dirty
*dirty
, u32 subres
)
1772 const struct svga3dsurface_cache
*cache
= &dirty
->cache
;
1773 u32 mip
= subres
% cache
->num_mip_levels
;
1774 const struct drm_vmw_size
*size
= &cache
->mip
[mip
].size
;
1775 SVGA3dBox
*box
= &dirty
->boxes
[subres
];
1780 box
->w
= size
->width
;
1781 box
->h
= size
->height
;
1782 box
->d
= size
->depth
;
1786 * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for texture
1789 static void vmw_surface_tex_dirty_range_add(struct vmw_resource
*res
,
1790 size_t start
, size_t end
)
1792 struct vmw_surface_dirty
*dirty
=
1793 (struct vmw_surface_dirty
*) res
->dirty
;
1794 size_t backup_end
= res
->backup_offset
+ res
->backup_size
;
1795 struct svga3dsurface_loc loc1
, loc2
;
1796 const struct svga3dsurface_cache
*cache
;
1798 start
= max_t(size_t, start
, res
->backup_offset
) - res
->backup_offset
;
1799 end
= min(end
, backup_end
) - res
->backup_offset
;
1800 cache
= &dirty
->cache
;
1801 svga3dsurface_get_loc(cache
, &loc1
, start
);
1802 svga3dsurface_get_loc(cache
, &loc2
, end
- 1);
1803 svga3dsurface_inc_loc(cache
, &loc2
);
1805 if (loc1
.sub_resource
+ 1 == loc2
.sub_resource
) {
1806 /* Dirty range covers a single sub-resource */
1807 vmw_subres_dirty_add(dirty
, &loc1
, &loc2
);
1809 /* Dirty range covers multiple sub-resources */
1810 struct svga3dsurface_loc loc_min
, loc_max
;
1813 svga3dsurface_max_loc(cache
, loc1
.sub_resource
, &loc_max
);
1814 vmw_subres_dirty_add(dirty
, &loc1
, &loc_max
);
1815 svga3dsurface_min_loc(cache
, loc2
.sub_resource
- 1, &loc_min
);
1816 vmw_subres_dirty_add(dirty
, &loc_min
, &loc2
);
1817 for (sub_res
= loc1
.sub_resource
+ 1;
1818 sub_res
< loc2
.sub_resource
- 1; ++sub_res
)
1819 vmw_subres_dirty_full(dirty
, sub_res
);
1824 * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for buffer
1827 static void vmw_surface_buf_dirty_range_add(struct vmw_resource
*res
,
1828 size_t start
, size_t end
)
1830 struct vmw_surface_dirty
*dirty
=
1831 (struct vmw_surface_dirty
*) res
->dirty
;
1832 const struct svga3dsurface_cache
*cache
= &dirty
->cache
;
1833 size_t backup_end
= res
->backup_offset
+ cache
->mip_chain_bytes
;
1834 SVGA3dBox
*box
= &dirty
->boxes
[0];
1837 box
->h
= box
->d
= 1;
1838 start
= max_t(size_t, start
, res
->backup_offset
) - res
->backup_offset
;
1839 end
= min(end
, backup_end
) - res
->backup_offset
;
1840 box_c2
= box
->x
+ box
->w
;
1841 if (box
->w
== 0 || box
->x
> start
)
1844 box
->w
= end
- box
->x
;
1848 * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for surfaces
1850 static void vmw_surface_dirty_range_add(struct vmw_resource
*res
, size_t start
,
1853 struct vmw_surface
*srf
= vmw_res_to_srf(res
);
1855 if (WARN_ON(end
<= res
->backup_offset
||
1856 start
>= res
->backup_offset
+ res
->backup_size
))
1859 if (srf
->metadata
.format
== SVGA3D_BUFFER
)
1860 vmw_surface_buf_dirty_range_add(res
, start
, end
);
1862 vmw_surface_tex_dirty_range_add(res
, start
, end
);
1866 * vmw_surface_dirty_sync - The surface's dirty_sync callback.
1868 static int vmw_surface_dirty_sync(struct vmw_resource
*res
)
1870 struct vmw_private
*dev_priv
= res
->dev_priv
;
1873 struct vmw_surface_dirty
*dirty
=
1874 (struct vmw_surface_dirty
*) res
->dirty
;
1876 const struct svga3dsurface_cache
*cache
= &dirty
->cache
;
1878 SVGA3dCmdHeader header
;
1879 SVGA3dCmdDXUpdateSubResource body
;
1882 SVGA3dCmdHeader header
;
1883 SVGA3dCmdUpdateGBImage body
;
1888 for (i
= 0; i
< dirty
->num_subres
; ++i
) {
1889 const SVGA3dBox
*box
= &dirty
->boxes
[i
];
1898 alloc_size
= num_dirty
* ((has_dx
) ? sizeof(*cmd1
) : sizeof(*cmd2
));
1899 cmd
= VMW_FIFO_RESERVE(dev_priv
, alloc_size
);
1906 for (i
= 0; i
< dirty
->num_subres
; ++i
) {
1907 const SVGA3dBox
*box
= &dirty
->boxes
[i
];
1913 * DX_UPDATE_SUBRESOURCE is aware of array surfaces.
1914 * UPDATE_GB_IMAGE is not.
1917 cmd1
->header
.id
= SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE
;
1918 cmd1
->header
.size
= sizeof(cmd1
->body
);
1919 cmd1
->body
.sid
= res
->id
;
1920 cmd1
->body
.subResource
= i
;
1921 cmd1
->body
.box
= *box
;
1924 cmd2
->header
.id
= SVGA_3D_CMD_UPDATE_GB_IMAGE
;
1925 cmd2
->header
.size
= sizeof(cmd2
->body
);
1926 cmd2
->body
.image
.sid
= res
->id
;
1927 cmd2
->body
.image
.face
= i
/ cache
->num_mip_levels
;
1928 cmd2
->body
.image
.mipmap
= i
-
1929 (cache
->num_mip_levels
* cmd2
->body
.image
.face
);
1930 cmd2
->body
.box
= *box
;
1935 vmw_fifo_commit(dev_priv
, alloc_size
);
1937 memset(&dirty
->boxes
[0], 0, sizeof(dirty
->boxes
[0]) *
1944 * vmw_surface_dirty_alloc - The surface's dirty_alloc callback.
1946 static int vmw_surface_dirty_alloc(struct vmw_resource
*res
)
1948 struct vmw_surface
*srf
= vmw_res_to_srf(res
);
1949 const struct vmw_surface_metadata
*metadata
= &srf
->metadata
;
1950 struct vmw_surface_dirty
*dirty
;
1955 size_t dirty_size
, acc_size
;
1956 static struct ttm_operation_ctx ctx
= {
1957 .interruptible
= false,
1958 .no_wait_gpu
= false
1962 if (metadata
->array_size
)
1963 num_layers
= metadata
->array_size
;
1964 else if (metadata
->flags
& SVGA3D_SURFACE_CUBEMAP
)
1965 num_layers
*= SVGA3D_MAX_SURFACE_FACES
;
1967 num_mip
= metadata
->mip_levels
[0];
1971 num_subres
= num_layers
* num_mip
;
1972 dirty_size
= struct_size(dirty
, boxes
, num_subres
);
1973 acc_size
= ttm_round_pot(dirty_size
);
1974 ret
= ttm_mem_global_alloc(vmw_mem_glob(res
->dev_priv
),
1977 VMW_DEBUG_USER("Out of graphics memory for surface "
1978 "dirty tracker.\n");
1982 dirty
= kvzalloc(dirty_size
, GFP_KERNEL
);
1988 num_samples
= max_t(u32
, 1, metadata
->multisample_count
);
1989 ret
= svga3dsurface_setup_cache(&metadata
->base_size
, metadata
->format
,
1990 num_mip
, num_layers
, num_samples
,
1995 dirty
->num_subres
= num_subres
;
1996 dirty
->size
= acc_size
;
1997 res
->dirty
= (struct vmw_resource_dirty
*) dirty
;
2004 ttm_mem_global_free(vmw_mem_glob(res
->dev_priv
), acc_size
);
2009 * vmw_surface_dirty_free - The surface's dirty_free callback
2011 static void vmw_surface_dirty_free(struct vmw_resource
*res
)
2013 struct vmw_surface_dirty
*dirty
=
2014 (struct vmw_surface_dirty
*) res
->dirty
;
2015 size_t acc_size
= dirty
->size
;
2018 ttm_mem_global_free(vmw_mem_glob(res
->dev_priv
), acc_size
);
2023 * vmw_surface_clean - The surface's clean callback
2025 static int vmw_surface_clean(struct vmw_resource
*res
)
2027 struct vmw_private
*dev_priv
= res
->dev_priv
;
2030 SVGA3dCmdHeader header
;
2031 SVGA3dCmdReadbackGBSurface body
;
2034 alloc_size
= sizeof(*cmd
);
2035 cmd
= VMW_FIFO_RESERVE(dev_priv
, alloc_size
);
2039 cmd
->header
.id
= SVGA_3D_CMD_READBACK_GB_SURFACE
;
2040 cmd
->header
.size
= sizeof(cmd
->body
);
2041 cmd
->body
.sid
= res
->id
;
2042 vmw_fifo_commit(dev_priv
, alloc_size
);
2048 * vmw_gb_surface_define - Define a private GB surface
2050 * @dev_priv: Pointer to a device private.
2051 * @user_accounting_size: Used to track user-space memory usage, set
2052 * to 0 for kernel mode only memory
2053 * @metadata: Metadata representing the surface to create.
2054 * @user_srf_out: allocated user_srf. Set to NULL on failure.
2056 * GB surfaces allocated by this function will not have a user mode handle, and
2057 * thus will only be visible to vmwgfx. For optimization reasons the
2058 * surface may later be given a user mode handle by another function to make
2059 * it available to user mode drivers.
2061 int vmw_gb_surface_define(struct vmw_private
*dev_priv
,
2062 uint32_t user_accounting_size
,
2063 const struct vmw_surface_metadata
*req
,
2064 struct vmw_surface
**srf_out
)
2066 struct vmw_surface_metadata
*metadata
;
2067 struct vmw_user_surface
*user_srf
;
2068 struct vmw_surface
*srf
;
2069 struct ttm_operation_ctx ctx
= {
2070 .interruptible
= true,
2071 .no_wait_gpu
= false
2073 u32 sample_count
= 1;
2080 if (!svga3dsurface_is_screen_target_format(req
->format
)) {
2081 VMW_DEBUG_USER("Invalid Screen Target surface format.");
2085 if (req
->base_size
.width
> dev_priv
->texture_max_width
||
2086 req
->base_size
.height
> dev_priv
->texture_max_height
) {
2087 VMW_DEBUG_USER("%ux%u\n, exceed max surface size %ux%u",
2088 req
->base_size
.width
,
2089 req
->base_size
.height
,
2090 dev_priv
->texture_max_width
,
2091 dev_priv
->texture_max_height
);
2095 const struct svga3d_surface_desc
*desc
=
2096 svga3dsurface_get_desc(req
->format
);
2098 if (desc
->block_desc
== SVGA3DBLOCKDESC_NONE
) {
2099 VMW_DEBUG_USER("Invalid surface format.\n");
2104 if (req
->autogen_filter
!= SVGA3D_TEX_FILTER_NONE
)
2107 if (req
->num_sizes
!= 1)
2110 if (req
->sizes
!= NULL
)
2113 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
2114 if (unlikely(ret
!= 0))
2117 ret
= ttm_mem_global_alloc(vmw_mem_glob(dev_priv
),
2118 user_accounting_size
, &ctx
);
2120 if (ret
!= -ERESTARTSYS
)
2121 DRM_ERROR("Out of graphics memory for surface.\n");
2125 user_srf
= kzalloc(sizeof(*user_srf
), GFP_KERNEL
);
2126 if (unlikely(!user_srf
)) {
2128 goto out_no_user_srf
;
2131 *srf_out
= &user_srf
->srf
;
2132 user_srf
->size
= user_accounting_size
;
2133 user_srf
->prime
.base
.shareable
= false;
2134 user_srf
->prime
.base
.tfile
= NULL
;
2136 srf
= &user_srf
->srf
;
2137 srf
->metadata
= *req
;
2138 srf
->offsets
= NULL
;
2140 metadata
= &srf
->metadata
;
2142 if (metadata
->array_size
)
2143 num_layers
= req
->array_size
;
2144 else if (metadata
->flags
& SVGA3D_SURFACE_CUBEMAP
)
2145 num_layers
= SVGA3D_MAX_SURFACE_FACES
;
2147 if (metadata
->flags
& SVGA3D_SURFACE_MULTISAMPLE
)
2148 sample_count
= metadata
->multisample_count
;
2150 srf
->res
.backup_size
=
2151 svga3dsurface_get_serialized_size_extended(metadata
->format
,
2152 metadata
->base_size
,
2153 metadata
->mip_levels
[0],
2157 if (metadata
->flags
& SVGA3D_SURFACE_BIND_STREAM_OUTPUT
)
2158 srf
->res
.backup_size
+= sizeof(SVGA3dDXSOState
);
2161 * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
2162 * size greater than STDU max width/height. This is really a workaround
2163 * to support creation of big framebuffer requested by some user-space
2164 * for whole topology. That big framebuffer won't really be used for
2165 * binding with screen target as during prepare_fb a separate surface is
2166 * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
2168 if (dev_priv
->active_display_unit
== vmw_du_screen_target
&&
2169 metadata
->scanout
&&
2170 metadata
->base_size
.width
<= dev_priv
->stdu_max_width
&&
2171 metadata
->base_size
.height
<= dev_priv
->stdu_max_height
)
2172 metadata
->flags
|= SVGA3D_SURFACE_SCREENTARGET
;
2175 * From this point, the generic resource management functions
2176 * destroy the object on failure.
2178 ret
= vmw_surface_init(dev_priv
, srf
, vmw_user_surface_free
);
2180 ttm_read_unlock(&dev_priv
->reservation_sem
);
2184 ttm_mem_global_free(vmw_mem_glob(dev_priv
), user_accounting_size
);
2187 ttm_read_unlock(&dev_priv
->reservation_sem
);