1 /**************************************************************************
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_resource_priv.h"
30 #include "ttm/ttm_placement.h"
33 struct vmw_resource res
;
34 SVGA3dShaderType type
;
38 struct vmw_user_shader
{
39 struct ttm_base_object base
;
40 struct vmw_shader shader
;
43 static uint64_t vmw_user_shader_size
;
44 static uint64_t vmw_shader_size
;
46 static void vmw_user_shader_free(struct vmw_resource
*res
);
47 static struct vmw_resource
*
48 vmw_user_shader_base_to_res(struct ttm_base_object
*base
);
50 static int vmw_gb_shader_create(struct vmw_resource
*res
);
51 static int vmw_gb_shader_bind(struct vmw_resource
*res
,
52 struct ttm_validate_buffer
*val_buf
);
53 static int vmw_gb_shader_unbind(struct vmw_resource
*res
,
55 struct ttm_validate_buffer
*val_buf
);
56 static int vmw_gb_shader_destroy(struct vmw_resource
*res
);
58 static const struct vmw_user_resource_conv user_shader_conv
= {
59 .object_type
= VMW_RES_SHADER
,
60 .base_obj_to_res
= vmw_user_shader_base_to_res
,
61 .res_free
= vmw_user_shader_free
64 const struct vmw_user_resource_conv
*user_shader_converter
=
68 static const struct vmw_res_func vmw_gb_shader_func
= {
69 .res_type
= vmw_res_shader
,
72 .type_name
= "guest backed shaders",
73 .backup_placement
= &vmw_mob_placement
,
74 .create
= vmw_gb_shader_create
,
75 .destroy
= vmw_gb_shader_destroy
,
76 .bind
= vmw_gb_shader_bind
,
77 .unbind
= vmw_gb_shader_unbind
84 static inline struct vmw_shader
*
85 vmw_res_to_shader(struct vmw_resource
*res
)
87 return container_of(res
, struct vmw_shader
, res
);
90 static void vmw_hw_shader_destroy(struct vmw_resource
*res
)
92 (void) vmw_gb_shader_destroy(res
);
95 static int vmw_gb_shader_init(struct vmw_private
*dev_priv
,
96 struct vmw_resource
*res
,
99 SVGA3dShaderType type
,
100 struct vmw_dma_buffer
*byte_code
,
101 void (*res_free
) (struct vmw_resource
*res
))
103 struct vmw_shader
*shader
= vmw_res_to_shader(res
);
106 ret
= vmw_resource_init(dev_priv
, res
, true,
107 res_free
, &vmw_gb_shader_func
);
110 if (unlikely(ret
!= 0)) {
118 res
->backup_size
= size
;
120 res
->backup
= vmw_dmabuf_reference(byte_code
);
121 res
->backup_offset
= offset
;
126 vmw_resource_activate(res
, vmw_hw_shader_destroy
);
130 static int vmw_gb_shader_create(struct vmw_resource
*res
)
132 struct vmw_private
*dev_priv
= res
->dev_priv
;
133 struct vmw_shader
*shader
= vmw_res_to_shader(res
);
136 SVGA3dCmdHeader header
;
137 SVGA3dCmdDefineGBShader body
;
140 if (likely(res
->id
!= -1))
143 ret
= vmw_resource_alloc_id(res
);
144 if (unlikely(ret
!= 0)) {
145 DRM_ERROR("Failed to allocate a shader id.\n");
149 if (unlikely(res
->id
>= VMWGFX_NUM_GB_SHADER
)) {
154 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
155 if (unlikely(cmd
== NULL
)) {
156 DRM_ERROR("Failed reserving FIFO space for shader "
162 cmd
->header
.id
= SVGA_3D_CMD_DEFINE_GB_SHADER
;
163 cmd
->header
.size
= sizeof(cmd
->body
);
164 cmd
->body
.shid
= res
->id
;
165 cmd
->body
.type
= shader
->type
;
166 cmd
->body
.sizeInBytes
= shader
->size
;
167 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
168 (void) vmw_3d_resource_inc(dev_priv
, false);
173 vmw_resource_release_id(res
);
178 static int vmw_gb_shader_bind(struct vmw_resource
*res
,
179 struct ttm_validate_buffer
*val_buf
)
181 struct vmw_private
*dev_priv
= res
->dev_priv
;
183 SVGA3dCmdHeader header
;
184 SVGA3dCmdBindGBShader body
;
186 struct ttm_buffer_object
*bo
= val_buf
->bo
;
188 BUG_ON(bo
->mem
.mem_type
!= VMW_PL_MOB
);
190 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
191 if (unlikely(cmd
== NULL
)) {
192 DRM_ERROR("Failed reserving FIFO space for shader "
197 cmd
->header
.id
= SVGA_3D_CMD_BIND_GB_SHADER
;
198 cmd
->header
.size
= sizeof(cmd
->body
);
199 cmd
->body
.shid
= res
->id
;
200 cmd
->body
.mobid
= bo
->mem
.start
;
201 cmd
->body
.offsetInBytes
= res
->backup_offset
;
202 res
->backup_dirty
= false;
203 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
208 static int vmw_gb_shader_unbind(struct vmw_resource
*res
,
210 struct ttm_validate_buffer
*val_buf
)
212 struct vmw_private
*dev_priv
= res
->dev_priv
;
214 SVGA3dCmdHeader header
;
215 SVGA3dCmdBindGBShader body
;
217 struct vmw_fence_obj
*fence
;
219 BUG_ON(res
->backup
->base
.mem
.mem_type
!= VMW_PL_MOB
);
221 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
222 if (unlikely(cmd
== NULL
)) {
223 DRM_ERROR("Failed reserving FIFO space for shader "
228 cmd
->header
.id
= SVGA_3D_CMD_BIND_GB_SHADER
;
229 cmd
->header
.size
= sizeof(cmd
->body
);
230 cmd
->body
.shid
= res
->id
;
231 cmd
->body
.mobid
= SVGA3D_INVALID_ID
;
232 cmd
->body
.offsetInBytes
= 0;
233 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
236 * Create a fence object and fence the backup buffer.
239 (void) vmw_execbuf_fence_commands(NULL
, dev_priv
,
242 vmw_fence_single_bo(val_buf
->bo
, fence
);
244 if (likely(fence
!= NULL
))
245 vmw_fence_obj_unreference(&fence
);
250 static int vmw_gb_shader_destroy(struct vmw_resource
*res
)
252 struct vmw_private
*dev_priv
= res
->dev_priv
;
254 SVGA3dCmdHeader header
;
255 SVGA3dCmdDestroyGBShader body
;
258 if (likely(res
->id
== -1))
261 mutex_lock(&dev_priv
->binding_mutex
);
262 vmw_context_binding_res_list_scrub(&res
->binding_head
);
264 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
265 if (unlikely(cmd
== NULL
)) {
266 DRM_ERROR("Failed reserving FIFO space for shader "
268 mutex_unlock(&dev_priv
->binding_mutex
);
272 cmd
->header
.id
= SVGA_3D_CMD_DESTROY_GB_SHADER
;
273 cmd
->header
.size
= sizeof(cmd
->body
);
274 cmd
->body
.shid
= res
->id
;
275 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
276 mutex_unlock(&dev_priv
->binding_mutex
);
277 vmw_resource_release_id(res
);
278 vmw_3d_resource_dec(dev_priv
, false);
284 * User-space shader management:
287 static struct vmw_resource
*
288 vmw_user_shader_base_to_res(struct ttm_base_object
*base
)
290 return &(container_of(base
, struct vmw_user_shader
, base
)->
294 static void vmw_user_shader_free(struct vmw_resource
*res
)
296 struct vmw_user_shader
*ushader
=
297 container_of(res
, struct vmw_user_shader
, shader
.res
);
298 struct vmw_private
*dev_priv
= res
->dev_priv
;
300 ttm_base_object_kfree(ushader
, base
);
301 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
302 vmw_user_shader_size
);
305 static void vmw_shader_free(struct vmw_resource
*res
)
307 struct vmw_shader
*shader
= vmw_res_to_shader(res
);
308 struct vmw_private
*dev_priv
= res
->dev_priv
;
311 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
316 * This function is called when user space has no more references on the
317 * base object. It releases the base-object's reference on the resource object.
320 static void vmw_user_shader_base_release(struct ttm_base_object
**p_base
)
322 struct ttm_base_object
*base
= *p_base
;
323 struct vmw_resource
*res
= vmw_user_shader_base_to_res(base
);
326 vmw_resource_unreference(&res
);
329 int vmw_shader_destroy_ioctl(struct drm_device
*dev
, void *data
,
330 struct drm_file
*file_priv
)
332 struct drm_vmw_shader_arg
*arg
= (struct drm_vmw_shader_arg
*)data
;
333 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
335 return ttm_ref_object_base_unref(tfile
, arg
->handle
,
339 static int vmw_user_shader_alloc(struct vmw_private
*dev_priv
,
340 struct vmw_dma_buffer
*buffer
,
343 SVGA3dShaderType shader_type
,
344 struct ttm_object_file
*tfile
,
347 struct vmw_user_shader
*ushader
;
348 struct vmw_resource
*res
, *tmp
;
352 * Approximate idr memory usage with 128 bytes. It will be limited
353 * by maximum number_of shaders anyway.
355 if (unlikely(vmw_user_shader_size
== 0))
356 vmw_user_shader_size
=
357 ttm_round_pot(sizeof(struct vmw_user_shader
)) + 128;
359 ret
= ttm_mem_global_alloc(vmw_mem_glob(dev_priv
),
360 vmw_user_shader_size
,
362 if (unlikely(ret
!= 0)) {
363 if (ret
!= -ERESTARTSYS
)
364 DRM_ERROR("Out of graphics memory for shader "
369 ushader
= kzalloc(sizeof(*ushader
), GFP_KERNEL
);
370 if (unlikely(ushader
== NULL
)) {
371 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
372 vmw_user_shader_size
);
377 res
= &ushader
->shader
.res
;
378 ushader
->base
.shareable
= false;
379 ushader
->base
.tfile
= NULL
;
382 * From here on, the destructor takes over resource freeing.
385 ret
= vmw_gb_shader_init(dev_priv
, res
, shader_size
,
386 offset
, shader_type
, buffer
,
387 vmw_user_shader_free
);
388 if (unlikely(ret
!= 0))
391 tmp
= vmw_resource_reference(res
);
392 ret
= ttm_base_object_init(tfile
, &ushader
->base
, false,
394 &vmw_user_shader_base_release
, NULL
);
396 if (unlikely(ret
!= 0)) {
397 vmw_resource_unreference(&tmp
);
402 *handle
= ushader
->base
.hash
.key
;
404 vmw_resource_unreference(&res
);
410 struct vmw_resource
*vmw_shader_alloc(struct vmw_private
*dev_priv
,
411 struct vmw_dma_buffer
*buffer
,
414 SVGA3dShaderType shader_type
)
416 struct vmw_shader
*shader
;
417 struct vmw_resource
*res
;
421 * Approximate idr memory usage with 128 bytes. It will be limited
422 * by maximum number_of shaders anyway.
424 if (unlikely(vmw_shader_size
== 0))
426 ttm_round_pot(sizeof(struct vmw_shader
)) + 128;
428 ret
= ttm_mem_global_alloc(vmw_mem_glob(dev_priv
),
431 if (unlikely(ret
!= 0)) {
432 if (ret
!= -ERESTARTSYS
)
433 DRM_ERROR("Out of graphics memory for shader "
438 shader
= kzalloc(sizeof(*shader
), GFP_KERNEL
);
439 if (unlikely(shader
== NULL
)) {
440 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
449 * From here on, the destructor takes over resource freeing.
451 ret
= vmw_gb_shader_init(dev_priv
, res
, shader_size
,
452 offset
, shader_type
, buffer
,
456 return ret
? ERR_PTR(ret
) : res
;
460 int vmw_shader_define_ioctl(struct drm_device
*dev
, void *data
,
461 struct drm_file
*file_priv
)
463 struct vmw_private
*dev_priv
= vmw_priv(dev
);
464 struct drm_vmw_shader_create_arg
*arg
=
465 (struct drm_vmw_shader_create_arg
*)data
;
466 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
467 struct vmw_dma_buffer
*buffer
= NULL
;
468 SVGA3dShaderType shader_type
;
471 if (arg
->buffer_handle
!= SVGA3D_INVALID_ID
) {
472 ret
= vmw_user_dmabuf_lookup(tfile
, arg
->buffer_handle
,
474 if (unlikely(ret
!= 0)) {
475 DRM_ERROR("Could not find buffer for shader "
480 if ((u64
)buffer
->base
.num_pages
* PAGE_SIZE
<
481 (u64
)arg
->size
+ (u64
)arg
->offset
) {
482 DRM_ERROR("Illegal buffer- or shader size.\n");
488 switch (arg
->shader_type
) {
489 case drm_vmw_shader_type_vs
:
490 shader_type
= SVGA3D_SHADERTYPE_VS
;
492 case drm_vmw_shader_type_ps
:
493 shader_type
= SVGA3D_SHADERTYPE_PS
;
495 case drm_vmw_shader_type_gs
:
496 shader_type
= SVGA3D_SHADERTYPE_GS
;
499 DRM_ERROR("Illegal shader type.\n");
504 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
505 if (unlikely(ret
!= 0))
508 ret
= vmw_user_shader_alloc(dev_priv
, buffer
, arg
->size
, arg
->offset
,
509 shader_type
, tfile
, &arg
->shader_handle
);
511 ttm_read_unlock(&dev_priv
->reservation_sem
);
513 vmw_dmabuf_unreference(&buffer
);
518 * vmw_compat_shader_id_ok - Check whether a compat shader user key and
519 * shader type are within valid bounds.
521 * @user_key: User space id of the shader.
522 * @shader_type: Shader type.
524 * Returns true if valid false if not.
526 static bool vmw_compat_shader_id_ok(u32 user_key
, SVGA3dShaderType shader_type
)
528 return user_key
<= ((1 << 20) - 1) && (unsigned) shader_type
< 16;
532 * vmw_compat_shader_key - Compute a hash key suitable for a compat shader.
534 * @user_key: User space id of the shader.
535 * @shader_type: Shader type.
537 * Returns a hash key suitable for a command buffer managed resource
538 * manager hash table.
540 static u32
vmw_compat_shader_key(u32 user_key
, SVGA3dShaderType shader_type
)
542 return user_key
| (shader_type
<< 20);
546 * vmw_compat_shader_remove - Stage a compat shader for removal.
548 * @man: Pointer to the compat shader manager identifying the shader namespace.
549 * @user_key: The key that is used to identify the shader. The key is
550 * unique to the shader type.
551 * @shader_type: Shader type.
552 * @list: Caller's list of staged command buffer resource actions.
554 int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager
*man
,
555 u32 user_key
, SVGA3dShaderType shader_type
,
556 struct list_head
*list
)
558 if (!vmw_compat_shader_id_ok(user_key
, shader_type
))
561 return vmw_cmdbuf_res_remove(man
, vmw_cmdbuf_res_compat_shader
,
562 vmw_compat_shader_key(user_key
,
568 * vmw_compat_shader_add - Create a compat shader and stage it for addition
569 * as a command buffer managed resource.
571 * @man: Pointer to the compat shader manager identifying the shader namespace.
572 * @user_key: The key that is used to identify the shader. The key is
573 * unique to the shader type.
574 * @bytecode: Pointer to the bytecode of the shader.
575 * @shader_type: Shader type.
576 * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
577 * to be created with.
578 * @list: Caller's list of staged command buffer resource actions.
581 int vmw_compat_shader_add(struct vmw_private
*dev_priv
,
582 struct vmw_cmdbuf_res_manager
*man
,
583 u32 user_key
, const void *bytecode
,
584 SVGA3dShaderType shader_type
,
586 struct list_head
*list
)
588 struct vmw_dma_buffer
*buf
;
589 struct ttm_bo_kmap_obj map
;
592 struct vmw_resource
*res
;
594 if (!vmw_compat_shader_id_ok(user_key
, shader_type
))
597 /* Allocate and pin a DMA buffer */
598 buf
= kzalloc(sizeof(*buf
), GFP_KERNEL
);
599 if (unlikely(buf
== NULL
))
602 ret
= vmw_dmabuf_init(dev_priv
, buf
, size
, &vmw_sys_ne_placement
,
603 true, vmw_dmabuf_bo_free
);
604 if (unlikely(ret
!= 0))
607 ret
= ttm_bo_reserve(&buf
->base
, false, true, false, NULL
);
608 if (unlikely(ret
!= 0))
611 /* Map and copy shader bytecode. */
612 ret
= ttm_bo_kmap(&buf
->base
, 0, PAGE_ALIGN(size
) >> PAGE_SHIFT
,
614 if (unlikely(ret
!= 0)) {
615 ttm_bo_unreserve(&buf
->base
);
619 memcpy(ttm_kmap_obj_virtual(&map
, &is_iomem
), bytecode
, size
);
623 ret
= ttm_bo_validate(&buf
->base
, &vmw_sys_placement
, false, true);
625 ttm_bo_unreserve(&buf
->base
);
627 res
= vmw_shader_alloc(dev_priv
, buf
, size
, 0, shader_type
);
628 if (unlikely(ret
!= 0))
631 ret
= vmw_cmdbuf_res_add(man
, vmw_cmdbuf_res_compat_shader
,
632 vmw_compat_shader_key(user_key
, shader_type
),
634 vmw_resource_unreference(&res
);
636 vmw_dmabuf_unreference(&buf
);
642 * vmw_compat_shader_lookup - Look up a compat shader
644 * @man: Pointer to the command buffer managed resource manager identifying
645 * the shader namespace.
646 * @user_key: The user space id of the shader.
647 * @shader_type: The shader type.
649 * Returns a refcounted pointer to a struct vmw_resource if the shader was
650 * found. An error pointer otherwise.
652 struct vmw_resource
*
653 vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager
*man
,
655 SVGA3dShaderType shader_type
)
657 if (!vmw_compat_shader_id_ok(user_key
, shader_type
))
658 return ERR_PTR(-EINVAL
);
660 return vmw_cmdbuf_res_lookup(man
, vmw_cmdbuf_res_compat_shader
,
661 vmw_compat_shader_key(user_key
,