1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <drm/ttm/ttm_placement.h>
30 #include "vmwgfx_drv.h"
31 #include "vmwgfx_resource_priv.h"
32 #include "vmwgfx_binding.h"
35 struct vmw_resource res
;
36 SVGA3dShaderType type
;
38 uint8_t num_input_sig
;
39 uint8_t num_output_sig
;
42 struct vmw_user_shader
{
43 struct ttm_base_object base
;
44 struct vmw_shader shader
;
47 struct vmw_dx_shader
{
48 struct vmw_resource res
;
49 struct vmw_resource
*ctx
;
50 struct vmw_resource
*cotable
;
53 struct list_head cotable_head
;
56 static uint64_t vmw_user_shader_size
;
57 static uint64_t vmw_shader_size
;
58 static size_t vmw_shader_dx_size
;
60 static void vmw_user_shader_free(struct vmw_resource
*res
);
61 static struct vmw_resource
*
62 vmw_user_shader_base_to_res(struct ttm_base_object
*base
);
64 static int vmw_gb_shader_create(struct vmw_resource
*res
);
65 static int vmw_gb_shader_bind(struct vmw_resource
*res
,
66 struct ttm_validate_buffer
*val_buf
);
67 static int vmw_gb_shader_unbind(struct vmw_resource
*res
,
69 struct ttm_validate_buffer
*val_buf
);
70 static int vmw_gb_shader_destroy(struct vmw_resource
*res
);
72 static int vmw_dx_shader_create(struct vmw_resource
*res
);
73 static int vmw_dx_shader_bind(struct vmw_resource
*res
,
74 struct ttm_validate_buffer
*val_buf
);
75 static int vmw_dx_shader_unbind(struct vmw_resource
*res
,
77 struct ttm_validate_buffer
*val_buf
);
78 static void vmw_dx_shader_commit_notify(struct vmw_resource
*res
,
79 enum vmw_cmdbuf_res_state state
);
80 static bool vmw_shader_id_ok(u32 user_key
, SVGA3dShaderType shader_type
);
81 static u32
vmw_shader_key(u32 user_key
, SVGA3dShaderType shader_type
);
82 static uint64_t vmw_user_shader_size
;
84 static const struct vmw_user_resource_conv user_shader_conv
= {
85 .object_type
= VMW_RES_SHADER
,
86 .base_obj_to_res
= vmw_user_shader_base_to_res
,
87 .res_free
= vmw_user_shader_free
90 const struct vmw_user_resource_conv
*user_shader_converter
=
94 static const struct vmw_res_func vmw_gb_shader_func
= {
95 .res_type
= vmw_res_shader
,
100 .type_name
= "guest backed shaders",
101 .backup_placement
= &vmw_mob_placement
,
102 .create
= vmw_gb_shader_create
,
103 .destroy
= vmw_gb_shader_destroy
,
104 .bind
= vmw_gb_shader_bind
,
105 .unbind
= vmw_gb_shader_unbind
108 static const struct vmw_res_func vmw_dx_shader_func
= {
109 .res_type
= vmw_res_shader
,
110 .needs_backup
= true,
114 .type_name
= "dx shaders",
115 .backup_placement
= &vmw_mob_placement
,
116 .create
= vmw_dx_shader_create
,
118 * The destroy callback is only called with a committed resource on
119 * context destroy, in which case we destroy the cotable anyway,
120 * so there's no need to destroy DX shaders separately.
123 .bind
= vmw_dx_shader_bind
,
124 .unbind
= vmw_dx_shader_unbind
,
125 .commit_notify
= vmw_dx_shader_commit_notify
,
132 static inline struct vmw_shader
*
133 vmw_res_to_shader(struct vmw_resource
*res
)
135 return container_of(res
, struct vmw_shader
, res
);
139 * vmw_res_to_dx_shader - typecast a struct vmw_resource to a
140 * struct vmw_dx_shader
142 * @res: Pointer to the struct vmw_resource.
144 static inline struct vmw_dx_shader
*
145 vmw_res_to_dx_shader(struct vmw_resource
*res
)
147 return container_of(res
, struct vmw_dx_shader
, res
);
150 static void vmw_hw_shader_destroy(struct vmw_resource
*res
)
152 if (likely(res
->func
->destroy
))
153 (void) res
->func
->destroy(res
);
159 static int vmw_gb_shader_init(struct vmw_private
*dev_priv
,
160 struct vmw_resource
*res
,
163 SVGA3dShaderType type
,
164 uint8_t num_input_sig
,
165 uint8_t num_output_sig
,
166 struct vmw_buffer_object
*byte_code
,
167 void (*res_free
) (struct vmw_resource
*res
))
169 struct vmw_shader
*shader
= vmw_res_to_shader(res
);
172 ret
= vmw_resource_init(dev_priv
, res
, true, res_free
,
173 &vmw_gb_shader_func
);
175 if (unlikely(ret
!= 0)) {
183 res
->backup_size
= size
;
185 res
->backup
= vmw_bo_reference(byte_code
);
186 res
->backup_offset
= offset
;
190 shader
->num_input_sig
= num_input_sig
;
191 shader
->num_output_sig
= num_output_sig
;
193 res
->hw_destroy
= vmw_hw_shader_destroy
;
201 static int vmw_gb_shader_create(struct vmw_resource
*res
)
203 struct vmw_private
*dev_priv
= res
->dev_priv
;
204 struct vmw_shader
*shader
= vmw_res_to_shader(res
);
207 SVGA3dCmdHeader header
;
208 SVGA3dCmdDefineGBShader body
;
211 if (likely(res
->id
!= -1))
214 ret
= vmw_resource_alloc_id(res
);
215 if (unlikely(ret
!= 0)) {
216 DRM_ERROR("Failed to allocate a shader id.\n");
220 if (unlikely(res
->id
>= VMWGFX_NUM_GB_SHADER
)) {
225 cmd
= VMW_FIFO_RESERVE(dev_priv
, sizeof(*cmd
));
226 if (unlikely(cmd
== NULL
)) {
231 cmd
->header
.id
= SVGA_3D_CMD_DEFINE_GB_SHADER
;
232 cmd
->header
.size
= sizeof(cmd
->body
);
233 cmd
->body
.shid
= res
->id
;
234 cmd
->body
.type
= shader
->type
;
235 cmd
->body
.sizeInBytes
= shader
->size
;
236 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
237 vmw_fifo_resource_inc(dev_priv
);
242 vmw_resource_release_id(res
);
247 static int vmw_gb_shader_bind(struct vmw_resource
*res
,
248 struct ttm_validate_buffer
*val_buf
)
250 struct vmw_private
*dev_priv
= res
->dev_priv
;
252 SVGA3dCmdHeader header
;
253 SVGA3dCmdBindGBShader body
;
255 struct ttm_buffer_object
*bo
= val_buf
->bo
;
257 BUG_ON(bo
->mem
.mem_type
!= VMW_PL_MOB
);
259 cmd
= VMW_FIFO_RESERVE(dev_priv
, sizeof(*cmd
));
260 if (unlikely(cmd
== NULL
))
263 cmd
->header
.id
= SVGA_3D_CMD_BIND_GB_SHADER
;
264 cmd
->header
.size
= sizeof(cmd
->body
);
265 cmd
->body
.shid
= res
->id
;
266 cmd
->body
.mobid
= bo
->mem
.start
;
267 cmd
->body
.offsetInBytes
= res
->backup_offset
;
268 res
->backup_dirty
= false;
269 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
274 static int vmw_gb_shader_unbind(struct vmw_resource
*res
,
276 struct ttm_validate_buffer
*val_buf
)
278 struct vmw_private
*dev_priv
= res
->dev_priv
;
280 SVGA3dCmdHeader header
;
281 SVGA3dCmdBindGBShader body
;
283 struct vmw_fence_obj
*fence
;
285 BUG_ON(res
->backup
->base
.mem
.mem_type
!= VMW_PL_MOB
);
287 cmd
= VMW_FIFO_RESERVE(dev_priv
, sizeof(*cmd
));
288 if (unlikely(cmd
== NULL
))
291 cmd
->header
.id
= SVGA_3D_CMD_BIND_GB_SHADER
;
292 cmd
->header
.size
= sizeof(cmd
->body
);
293 cmd
->body
.shid
= res
->id
;
294 cmd
->body
.mobid
= SVGA3D_INVALID_ID
;
295 cmd
->body
.offsetInBytes
= 0;
296 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
299 * Create a fence object and fence the backup buffer.
302 (void) vmw_execbuf_fence_commands(NULL
, dev_priv
,
305 vmw_bo_fence_single(val_buf
->bo
, fence
);
307 if (likely(fence
!= NULL
))
308 vmw_fence_obj_unreference(&fence
);
313 static int vmw_gb_shader_destroy(struct vmw_resource
*res
)
315 struct vmw_private
*dev_priv
= res
->dev_priv
;
317 SVGA3dCmdHeader header
;
318 SVGA3dCmdDestroyGBShader body
;
321 if (likely(res
->id
== -1))
324 mutex_lock(&dev_priv
->binding_mutex
);
325 vmw_binding_res_list_scrub(&res
->binding_head
);
327 cmd
= VMW_FIFO_RESERVE(dev_priv
, sizeof(*cmd
));
328 if (unlikely(cmd
== NULL
)) {
329 mutex_unlock(&dev_priv
->binding_mutex
);
333 cmd
->header
.id
= SVGA_3D_CMD_DESTROY_GB_SHADER
;
334 cmd
->header
.size
= sizeof(cmd
->body
);
335 cmd
->body
.shid
= res
->id
;
336 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
337 mutex_unlock(&dev_priv
->binding_mutex
);
338 vmw_resource_release_id(res
);
339 vmw_fifo_resource_dec(dev_priv
);
349 * vmw_dx_shader_commit_notify - Notify that a shader operation has been
350 * committed to hardware from a user-supplied command stream.
352 * @res: Pointer to the shader resource.
353 * @state: Indicating whether a creation or removal has been committed.
356 static void vmw_dx_shader_commit_notify(struct vmw_resource
*res
,
357 enum vmw_cmdbuf_res_state state
)
359 struct vmw_dx_shader
*shader
= vmw_res_to_dx_shader(res
);
360 struct vmw_private
*dev_priv
= res
->dev_priv
;
362 if (state
== VMW_CMDBUF_RES_ADD
) {
363 mutex_lock(&dev_priv
->binding_mutex
);
364 vmw_cotable_add_resource(shader
->cotable
,
365 &shader
->cotable_head
);
366 shader
->committed
= true;
367 res
->id
= shader
->id
;
368 mutex_unlock(&dev_priv
->binding_mutex
);
370 mutex_lock(&dev_priv
->binding_mutex
);
371 list_del_init(&shader
->cotable_head
);
372 shader
->committed
= false;
374 mutex_unlock(&dev_priv
->binding_mutex
);
379 * vmw_dx_shader_unscrub - Have the device reattach a MOB to a DX shader.
381 * @res: The shader resource
383 * This function reverts a scrub operation.
385 static int vmw_dx_shader_unscrub(struct vmw_resource
*res
)
387 struct vmw_dx_shader
*shader
= vmw_res_to_dx_shader(res
);
388 struct vmw_private
*dev_priv
= res
->dev_priv
;
390 SVGA3dCmdHeader header
;
391 SVGA3dCmdDXBindShader body
;
394 if (!list_empty(&shader
->cotable_head
) || !shader
->committed
)
397 cmd
= VMW_FIFO_RESERVE_DX(dev_priv
, sizeof(*cmd
), shader
->ctx
->id
);
398 if (unlikely(cmd
== NULL
))
401 cmd
->header
.id
= SVGA_3D_CMD_DX_BIND_SHADER
;
402 cmd
->header
.size
= sizeof(cmd
->body
);
403 cmd
->body
.cid
= shader
->ctx
->id
;
404 cmd
->body
.shid
= shader
->id
;
405 cmd
->body
.mobid
= res
->backup
->base
.mem
.start
;
406 cmd
->body
.offsetInBytes
= res
->backup_offset
;
407 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
409 vmw_cotable_add_resource(shader
->cotable
, &shader
->cotable_head
);
415 * vmw_dx_shader_create - The DX shader create callback
417 * @res: The DX shader resource
419 * The create callback is called as part of resource validation and
420 * makes sure that we unscrub the shader if it's previously been scrubbed.
422 static int vmw_dx_shader_create(struct vmw_resource
*res
)
424 struct vmw_private
*dev_priv
= res
->dev_priv
;
425 struct vmw_dx_shader
*shader
= vmw_res_to_dx_shader(res
);
428 WARN_ON_ONCE(!shader
->committed
);
430 if (vmw_resource_mob_attached(res
)) {
431 mutex_lock(&dev_priv
->binding_mutex
);
432 ret
= vmw_dx_shader_unscrub(res
);
433 mutex_unlock(&dev_priv
->binding_mutex
);
436 res
->id
= shader
->id
;
441 * vmw_dx_shader_bind - The DX shader bind callback
443 * @res: The DX shader resource
444 * @val_buf: Pointer to the validate buffer.
447 static int vmw_dx_shader_bind(struct vmw_resource
*res
,
448 struct ttm_validate_buffer
*val_buf
)
450 struct vmw_private
*dev_priv
= res
->dev_priv
;
451 struct ttm_buffer_object
*bo
= val_buf
->bo
;
453 BUG_ON(bo
->mem
.mem_type
!= VMW_PL_MOB
);
454 mutex_lock(&dev_priv
->binding_mutex
);
455 vmw_dx_shader_unscrub(res
);
456 mutex_unlock(&dev_priv
->binding_mutex
);
462 * vmw_dx_shader_scrub - Have the device unbind a MOB from a DX shader.
464 * @res: The shader resource
466 * This function unbinds a MOB from the DX shader without requiring the
467 * MOB dma_buffer to be reserved. The driver still considers the MOB bound.
468 * However, once the driver eventually decides to unbind the MOB, it doesn't
469 * need to access the context.
471 static int vmw_dx_shader_scrub(struct vmw_resource
*res
)
473 struct vmw_dx_shader
*shader
= vmw_res_to_dx_shader(res
);
474 struct vmw_private
*dev_priv
= res
->dev_priv
;
476 SVGA3dCmdHeader header
;
477 SVGA3dCmdDXBindShader body
;
480 if (list_empty(&shader
->cotable_head
))
483 WARN_ON_ONCE(!shader
->committed
);
484 cmd
= VMW_FIFO_RESERVE(dev_priv
, sizeof(*cmd
));
485 if (unlikely(cmd
== NULL
))
488 cmd
->header
.id
= SVGA_3D_CMD_DX_BIND_SHADER
;
489 cmd
->header
.size
= sizeof(cmd
->body
);
490 cmd
->body
.cid
= shader
->ctx
->id
;
491 cmd
->body
.shid
= res
->id
;
492 cmd
->body
.mobid
= SVGA3D_INVALID_ID
;
493 cmd
->body
.offsetInBytes
= 0;
494 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
496 list_del_init(&shader
->cotable_head
);
502 * vmw_dx_shader_unbind - The dx shader unbind callback.
504 * @res: The shader resource
505 * @readback: Whether this is a readback unbind. Currently unused.
506 * @val_buf: MOB buffer information.
508 static int vmw_dx_shader_unbind(struct vmw_resource
*res
,
510 struct ttm_validate_buffer
*val_buf
)
512 struct vmw_private
*dev_priv
= res
->dev_priv
;
513 struct vmw_fence_obj
*fence
;
516 BUG_ON(res
->backup
->base
.mem
.mem_type
!= VMW_PL_MOB
);
518 mutex_lock(&dev_priv
->binding_mutex
);
519 ret
= vmw_dx_shader_scrub(res
);
520 mutex_unlock(&dev_priv
->binding_mutex
);
525 (void) vmw_execbuf_fence_commands(NULL
, dev_priv
,
527 vmw_bo_fence_single(val_buf
->bo
, fence
);
529 if (likely(fence
!= NULL
))
530 vmw_fence_obj_unreference(&fence
);
536 * vmw_dx_shader_cotable_list_scrub - The cotable unbind_func callback for
539 * @dev_priv: Pointer to device private structure.
540 * @list: The list of cotable resources.
541 * @readback: Whether the call was part of a readback unbind.
543 * Scrubs all shader MOBs so that any subsequent shader unbind or shader
544 * destroy operation won't need to swap in the context.
546 void vmw_dx_shader_cotable_list_scrub(struct vmw_private
*dev_priv
,
547 struct list_head
*list
,
550 struct vmw_dx_shader
*entry
, *next
;
552 lockdep_assert_held_once(&dev_priv
->binding_mutex
);
554 list_for_each_entry_safe(entry
, next
, list
, cotable_head
) {
555 WARN_ON(vmw_dx_shader_scrub(&entry
->res
));
557 entry
->committed
= false;
562 * vmw_dx_shader_res_free - The DX shader free callback
564 * @res: The shader resource
566 * Frees the DX shader resource and updates memory accounting.
568 static void vmw_dx_shader_res_free(struct vmw_resource
*res
)
570 struct vmw_private
*dev_priv
= res
->dev_priv
;
571 struct vmw_dx_shader
*shader
= vmw_res_to_dx_shader(res
);
573 vmw_resource_unreference(&shader
->cotable
);
575 ttm_mem_global_free(vmw_mem_glob(dev_priv
), vmw_shader_dx_size
);
579 * vmw_dx_shader_add - Add a shader resource as a command buffer managed
582 * @man: The command buffer resource manager.
583 * @ctx: Pointer to the context resource.
584 * @user_key: The id used for this shader.
585 * @shader_type: The shader type.
586 * @list: The list of staged command buffer managed resources.
588 int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager
*man
,
589 struct vmw_resource
*ctx
,
591 SVGA3dShaderType shader_type
,
592 struct list_head
*list
)
594 struct vmw_dx_shader
*shader
;
595 struct vmw_resource
*res
;
596 struct vmw_private
*dev_priv
= ctx
->dev_priv
;
597 struct ttm_operation_ctx ttm_opt_ctx
= {
598 .interruptible
= true,
603 if (!vmw_shader_dx_size
)
604 vmw_shader_dx_size
= ttm_round_pot(sizeof(*shader
));
606 if (!vmw_shader_id_ok(user_key
, shader_type
))
609 ret
= ttm_mem_global_alloc(vmw_mem_glob(dev_priv
), vmw_shader_dx_size
,
612 if (ret
!= -ERESTARTSYS
)
613 DRM_ERROR("Out of graphics memory for shader "
618 shader
= kmalloc(sizeof(*shader
), GFP_KERNEL
);
620 ttm_mem_global_free(vmw_mem_glob(dev_priv
), vmw_shader_dx_size
);
626 shader
->cotable
= vmw_resource_reference
627 (vmw_context_cotable(ctx
, SVGA_COTABLE_DXSHADER
));
628 shader
->id
= user_key
;
629 shader
->committed
= false;
630 INIT_LIST_HEAD(&shader
->cotable_head
);
631 ret
= vmw_resource_init(dev_priv
, res
, true,
632 vmw_dx_shader_res_free
, &vmw_dx_shader_func
);
634 goto out_resource_init
;
637 * The user_key name-space is not per shader type for DX shaders,
638 * so when hashing, use a single zero shader type.
640 ret
= vmw_cmdbuf_res_add(man
, vmw_cmdbuf_res_shader
,
641 vmw_shader_key(user_key
, 0),
644 goto out_resource_init
;
646 res
->id
= shader
->id
;
647 res
->hw_destroy
= vmw_hw_shader_destroy
;
650 vmw_resource_unreference(&res
);
658 * User-space shader management:
661 static struct vmw_resource
*
662 vmw_user_shader_base_to_res(struct ttm_base_object
*base
)
664 return &(container_of(base
, struct vmw_user_shader
, base
)->
668 static void vmw_user_shader_free(struct vmw_resource
*res
)
670 struct vmw_user_shader
*ushader
=
671 container_of(res
, struct vmw_user_shader
, shader
.res
);
672 struct vmw_private
*dev_priv
= res
->dev_priv
;
674 ttm_base_object_kfree(ushader
, base
);
675 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
676 vmw_user_shader_size
);
679 static void vmw_shader_free(struct vmw_resource
*res
)
681 struct vmw_shader
*shader
= vmw_res_to_shader(res
);
682 struct vmw_private
*dev_priv
= res
->dev_priv
;
685 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
690 * This function is called when user space has no more references on the
691 * base object. It releases the base-object's reference on the resource object.
694 static void vmw_user_shader_base_release(struct ttm_base_object
**p_base
)
696 struct ttm_base_object
*base
= *p_base
;
697 struct vmw_resource
*res
= vmw_user_shader_base_to_res(base
);
700 vmw_resource_unreference(&res
);
703 int vmw_shader_destroy_ioctl(struct drm_device
*dev
, void *data
,
704 struct drm_file
*file_priv
)
706 struct drm_vmw_shader_arg
*arg
= (struct drm_vmw_shader_arg
*)data
;
707 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
709 return ttm_ref_object_base_unref(tfile
, arg
->handle
,
713 static int vmw_user_shader_alloc(struct vmw_private
*dev_priv
,
714 struct vmw_buffer_object
*buffer
,
717 SVGA3dShaderType shader_type
,
718 uint8_t num_input_sig
,
719 uint8_t num_output_sig
,
720 struct ttm_object_file
*tfile
,
723 struct vmw_user_shader
*ushader
;
724 struct vmw_resource
*res
, *tmp
;
725 struct ttm_operation_ctx ctx
= {
726 .interruptible
= true,
731 if (unlikely(vmw_user_shader_size
== 0))
732 vmw_user_shader_size
=
733 ttm_round_pot(sizeof(struct vmw_user_shader
)) +
734 VMW_IDA_ACC_SIZE
+ TTM_OBJ_EXTRA_SIZE
;
736 ret
= ttm_mem_global_alloc(vmw_mem_glob(dev_priv
),
737 vmw_user_shader_size
,
739 if (unlikely(ret
!= 0)) {
740 if (ret
!= -ERESTARTSYS
)
741 DRM_ERROR("Out of graphics memory for shader "
746 ushader
= kzalloc(sizeof(*ushader
), GFP_KERNEL
);
747 if (unlikely(!ushader
)) {
748 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
749 vmw_user_shader_size
);
754 res
= &ushader
->shader
.res
;
755 ushader
->base
.shareable
= false;
756 ushader
->base
.tfile
= NULL
;
759 * From here on, the destructor takes over resource freeing.
762 ret
= vmw_gb_shader_init(dev_priv
, res
, shader_size
,
763 offset
, shader_type
, num_input_sig
,
764 num_output_sig
, buffer
,
765 vmw_user_shader_free
);
766 if (unlikely(ret
!= 0))
769 tmp
= vmw_resource_reference(res
);
770 ret
= ttm_base_object_init(tfile
, &ushader
->base
, false,
772 &vmw_user_shader_base_release
, NULL
);
774 if (unlikely(ret
!= 0)) {
775 vmw_resource_unreference(&tmp
);
780 *handle
= ushader
->base
.handle
;
782 vmw_resource_unreference(&res
);
788 static struct vmw_resource
*vmw_shader_alloc(struct vmw_private
*dev_priv
,
789 struct vmw_buffer_object
*buffer
,
792 SVGA3dShaderType shader_type
)
794 struct vmw_shader
*shader
;
795 struct vmw_resource
*res
;
796 struct ttm_operation_ctx ctx
= {
797 .interruptible
= true,
802 if (unlikely(vmw_shader_size
== 0))
804 ttm_round_pot(sizeof(struct vmw_shader
)) +
807 ret
= ttm_mem_global_alloc(vmw_mem_glob(dev_priv
),
810 if (unlikely(ret
!= 0)) {
811 if (ret
!= -ERESTARTSYS
)
812 DRM_ERROR("Out of graphics memory for shader "
817 shader
= kzalloc(sizeof(*shader
), GFP_KERNEL
);
818 if (unlikely(!shader
)) {
819 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
828 * From here on, the destructor takes over resource freeing.
830 ret
= vmw_gb_shader_init(dev_priv
, res
, shader_size
,
831 offset
, shader_type
, 0, 0, buffer
,
835 return ret
? ERR_PTR(ret
) : res
;
839 static int vmw_shader_define(struct drm_device
*dev
, struct drm_file
*file_priv
,
840 enum drm_vmw_shader_type shader_type_drm
,
841 u32 buffer_handle
, size_t size
, size_t offset
,
842 uint8_t num_input_sig
, uint8_t num_output_sig
,
843 uint32_t *shader_handle
)
845 struct vmw_private
*dev_priv
= vmw_priv(dev
);
846 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
847 struct vmw_buffer_object
*buffer
= NULL
;
848 SVGA3dShaderType shader_type
;
851 if (buffer_handle
!= SVGA3D_INVALID_ID
) {
852 ret
= vmw_user_bo_lookup(tfile
, buffer_handle
,
854 if (unlikely(ret
!= 0)) {
855 VMW_DEBUG_USER("Couldn't find buffer for shader creation.\n");
859 if ((u64
)buffer
->base
.num_pages
* PAGE_SIZE
<
860 (u64
)size
+ (u64
)offset
) {
861 VMW_DEBUG_USER("Illegal buffer- or shader size.\n");
867 switch (shader_type_drm
) {
868 case drm_vmw_shader_type_vs
:
869 shader_type
= SVGA3D_SHADERTYPE_VS
;
871 case drm_vmw_shader_type_ps
:
872 shader_type
= SVGA3D_SHADERTYPE_PS
;
875 VMW_DEBUG_USER("Illegal shader type.\n");
880 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
881 if (unlikely(ret
!= 0))
884 ret
= vmw_user_shader_alloc(dev_priv
, buffer
, size
, offset
,
885 shader_type
, num_input_sig
,
886 num_output_sig
, tfile
, shader_handle
);
888 ttm_read_unlock(&dev_priv
->reservation_sem
);
890 vmw_bo_unreference(&buffer
);
895 * vmw_shader_id_ok - Check whether a compat shader user key and
896 * shader type are within valid bounds.
898 * @user_key: User space id of the shader.
899 * @shader_type: Shader type.
901 * Returns true if valid false if not.
903 static bool vmw_shader_id_ok(u32 user_key
, SVGA3dShaderType shader_type
)
905 return user_key
<= ((1 << 20) - 1) && (unsigned) shader_type
< 16;
909 * vmw_shader_key - Compute a hash key suitable for a compat shader.
911 * @user_key: User space id of the shader.
912 * @shader_type: Shader type.
914 * Returns a hash key suitable for a command buffer managed resource
915 * manager hash table.
917 static u32
vmw_shader_key(u32 user_key
, SVGA3dShaderType shader_type
)
919 return user_key
| (shader_type
<< 20);
923 * vmw_shader_remove - Stage a compat shader for removal.
925 * @man: Pointer to the compat shader manager identifying the shader namespace.
926 * @user_key: The key that is used to identify the shader. The key is
927 * unique to the shader type.
928 * @shader_type: Shader type.
929 * @list: Caller's list of staged command buffer resource actions.
931 int vmw_shader_remove(struct vmw_cmdbuf_res_manager
*man
,
932 u32 user_key
, SVGA3dShaderType shader_type
,
933 struct list_head
*list
)
935 struct vmw_resource
*dummy
;
937 if (!vmw_shader_id_ok(user_key
, shader_type
))
940 return vmw_cmdbuf_res_remove(man
, vmw_cmdbuf_res_shader
,
941 vmw_shader_key(user_key
, shader_type
),
946 * vmw_compat_shader_add - Create a compat shader and stage it for addition
947 * as a command buffer managed resource.
949 * @man: Pointer to the compat shader manager identifying the shader namespace.
950 * @user_key: The key that is used to identify the shader. The key is
951 * unique to the shader type.
952 * @bytecode: Pointer to the bytecode of the shader.
953 * @shader_type: Shader type.
954 * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
955 * to be created with.
956 * @list: Caller's list of staged command buffer resource actions.
959 int vmw_compat_shader_add(struct vmw_private
*dev_priv
,
960 struct vmw_cmdbuf_res_manager
*man
,
961 u32 user_key
, const void *bytecode
,
962 SVGA3dShaderType shader_type
,
964 struct list_head
*list
)
966 struct ttm_operation_ctx ctx
= { false, true };
967 struct vmw_buffer_object
*buf
;
968 struct ttm_bo_kmap_obj map
;
971 struct vmw_resource
*res
;
973 if (!vmw_shader_id_ok(user_key
, shader_type
))
976 /* Allocate and pin a DMA buffer */
977 buf
= kzalloc(sizeof(*buf
), GFP_KERNEL
);
981 ret
= vmw_bo_init(dev_priv
, buf
, size
, &vmw_sys_placement
,
982 true, true, vmw_bo_bo_free
);
983 if (unlikely(ret
!= 0))
986 ret
= ttm_bo_reserve(&buf
->base
, false, true, NULL
);
987 if (unlikely(ret
!= 0))
990 /* Map and copy shader bytecode. */
991 ret
= ttm_bo_kmap(&buf
->base
, 0, PAGE_ALIGN(size
) >> PAGE_SHIFT
,
993 if (unlikely(ret
!= 0)) {
994 ttm_bo_unreserve(&buf
->base
);
998 memcpy(ttm_kmap_obj_virtual(&map
, &is_iomem
), bytecode
, size
);
1001 ttm_bo_kunmap(&map
);
1002 ret
= ttm_bo_validate(&buf
->base
, &vmw_sys_placement
, &ctx
);
1004 ttm_bo_unreserve(&buf
->base
);
1006 res
= vmw_shader_alloc(dev_priv
, buf
, size
, 0, shader_type
);
1007 if (unlikely(ret
!= 0))
1010 ret
= vmw_cmdbuf_res_add(man
, vmw_cmdbuf_res_shader
,
1011 vmw_shader_key(user_key
, shader_type
),
1013 vmw_resource_unreference(&res
);
1015 vmw_bo_unreference(&buf
);
1021 * vmw_shader_lookup - Look up a compat shader
1023 * @man: Pointer to the command buffer managed resource manager identifying
1024 * the shader namespace.
1025 * @user_key: The user space id of the shader.
1026 * @shader_type: The shader type.
1028 * Returns a refcounted pointer to a struct vmw_resource if the shader was
1029 * found. An error pointer otherwise.
1031 struct vmw_resource
*
1032 vmw_shader_lookup(struct vmw_cmdbuf_res_manager
*man
,
1034 SVGA3dShaderType shader_type
)
1036 if (!vmw_shader_id_ok(user_key
, shader_type
))
1037 return ERR_PTR(-EINVAL
);
1039 return vmw_cmdbuf_res_lookup(man
, vmw_cmdbuf_res_shader
,
1040 vmw_shader_key(user_key
, shader_type
));
1043 int vmw_shader_define_ioctl(struct drm_device
*dev
, void *data
,
1044 struct drm_file
*file_priv
)
1046 struct drm_vmw_shader_create_arg
*arg
=
1047 (struct drm_vmw_shader_create_arg
*)data
;
1049 return vmw_shader_define(dev
, file_priv
, arg
->shader_type
,
1051 arg
->size
, arg
->offset
,
1053 &arg
->shader_handle
);