Linux 4.2.1
[linux/fpc-iii.git] / drivers / gpu / drm / vmwgfx / vmwgfx_shader.c
blob6a4584a43aa6cec2d29d35471e6c25559897b3a6
1 /**************************************************************************
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_resource_priv.h"
30 #include "ttm/ttm_placement.h"
32 struct vmw_shader {
33 struct vmw_resource res;
34 SVGA3dShaderType type;
35 uint32_t size;
38 struct vmw_user_shader {
39 struct ttm_base_object base;
40 struct vmw_shader shader;
43 static uint64_t vmw_user_shader_size;
44 static uint64_t vmw_shader_size;
46 static void vmw_user_shader_free(struct vmw_resource *res);
47 static struct vmw_resource *
48 vmw_user_shader_base_to_res(struct ttm_base_object *base);
50 static int vmw_gb_shader_create(struct vmw_resource *res);
51 static int vmw_gb_shader_bind(struct vmw_resource *res,
52 struct ttm_validate_buffer *val_buf);
53 static int vmw_gb_shader_unbind(struct vmw_resource *res,
54 bool readback,
55 struct ttm_validate_buffer *val_buf);
56 static int vmw_gb_shader_destroy(struct vmw_resource *res);
58 static const struct vmw_user_resource_conv user_shader_conv = {
59 .object_type = VMW_RES_SHADER,
60 .base_obj_to_res = vmw_user_shader_base_to_res,
61 .res_free = vmw_user_shader_free
64 const struct vmw_user_resource_conv *user_shader_converter =
65 &user_shader_conv;
68 static const struct vmw_res_func vmw_gb_shader_func = {
69 .res_type = vmw_res_shader,
70 .needs_backup = true,
71 .may_evict = true,
72 .type_name = "guest backed shaders",
73 .backup_placement = &vmw_mob_placement,
74 .create = vmw_gb_shader_create,
75 .destroy = vmw_gb_shader_destroy,
76 .bind = vmw_gb_shader_bind,
77 .unbind = vmw_gb_shader_unbind
80 /**
81 * Shader management:
84 static inline struct vmw_shader *
85 vmw_res_to_shader(struct vmw_resource *res)
87 return container_of(res, struct vmw_shader, res);
90 static void vmw_hw_shader_destroy(struct vmw_resource *res)
92 (void) vmw_gb_shader_destroy(res);
95 static int vmw_gb_shader_init(struct vmw_private *dev_priv,
96 struct vmw_resource *res,
97 uint32_t size,
98 uint64_t offset,
99 SVGA3dShaderType type,
100 struct vmw_dma_buffer *byte_code,
101 void (*res_free) (struct vmw_resource *res))
103 struct vmw_shader *shader = vmw_res_to_shader(res);
104 int ret;
106 ret = vmw_resource_init(dev_priv, res, true,
107 res_free, &vmw_gb_shader_func);
110 if (unlikely(ret != 0)) {
111 if (res_free)
112 res_free(res);
113 else
114 kfree(res);
115 return ret;
118 res->backup_size = size;
119 if (byte_code) {
120 res->backup = vmw_dmabuf_reference(byte_code);
121 res->backup_offset = offset;
123 shader->size = size;
124 shader->type = type;
126 vmw_resource_activate(res, vmw_hw_shader_destroy);
127 return 0;
130 static int vmw_gb_shader_create(struct vmw_resource *res)
132 struct vmw_private *dev_priv = res->dev_priv;
133 struct vmw_shader *shader = vmw_res_to_shader(res);
134 int ret;
135 struct {
136 SVGA3dCmdHeader header;
137 SVGA3dCmdDefineGBShader body;
138 } *cmd;
140 if (likely(res->id != -1))
141 return 0;
143 ret = vmw_resource_alloc_id(res);
144 if (unlikely(ret != 0)) {
145 DRM_ERROR("Failed to allocate a shader id.\n");
146 goto out_no_id;
149 if (unlikely(res->id >= VMWGFX_NUM_GB_SHADER)) {
150 ret = -EBUSY;
151 goto out_no_fifo;
154 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
155 if (unlikely(cmd == NULL)) {
156 DRM_ERROR("Failed reserving FIFO space for shader "
157 "creation.\n");
158 ret = -ENOMEM;
159 goto out_no_fifo;
162 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SHADER;
163 cmd->header.size = sizeof(cmd->body);
164 cmd->body.shid = res->id;
165 cmd->body.type = shader->type;
166 cmd->body.sizeInBytes = shader->size;
167 vmw_fifo_commit(dev_priv, sizeof(*cmd));
168 (void) vmw_3d_resource_inc(dev_priv, false);
170 return 0;
172 out_no_fifo:
173 vmw_resource_release_id(res);
174 out_no_id:
175 return ret;
178 static int vmw_gb_shader_bind(struct vmw_resource *res,
179 struct ttm_validate_buffer *val_buf)
181 struct vmw_private *dev_priv = res->dev_priv;
182 struct {
183 SVGA3dCmdHeader header;
184 SVGA3dCmdBindGBShader body;
185 } *cmd;
186 struct ttm_buffer_object *bo = val_buf->bo;
188 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
190 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
191 if (unlikely(cmd == NULL)) {
192 DRM_ERROR("Failed reserving FIFO space for shader "
193 "binding.\n");
194 return -ENOMEM;
197 cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
198 cmd->header.size = sizeof(cmd->body);
199 cmd->body.shid = res->id;
200 cmd->body.mobid = bo->mem.start;
201 cmd->body.offsetInBytes = res->backup_offset;
202 res->backup_dirty = false;
203 vmw_fifo_commit(dev_priv, sizeof(*cmd));
205 return 0;
208 static int vmw_gb_shader_unbind(struct vmw_resource *res,
209 bool readback,
210 struct ttm_validate_buffer *val_buf)
212 struct vmw_private *dev_priv = res->dev_priv;
213 struct {
214 SVGA3dCmdHeader header;
215 SVGA3dCmdBindGBShader body;
216 } *cmd;
217 struct vmw_fence_obj *fence;
219 BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
221 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
222 if (unlikely(cmd == NULL)) {
223 DRM_ERROR("Failed reserving FIFO space for shader "
224 "unbinding.\n");
225 return -ENOMEM;
228 cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
229 cmd->header.size = sizeof(cmd->body);
230 cmd->body.shid = res->id;
231 cmd->body.mobid = SVGA3D_INVALID_ID;
232 cmd->body.offsetInBytes = 0;
233 vmw_fifo_commit(dev_priv, sizeof(*cmd));
236 * Create a fence object and fence the backup buffer.
239 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
240 &fence, NULL);
242 vmw_fence_single_bo(val_buf->bo, fence);
244 if (likely(fence != NULL))
245 vmw_fence_obj_unreference(&fence);
247 return 0;
250 static int vmw_gb_shader_destroy(struct vmw_resource *res)
252 struct vmw_private *dev_priv = res->dev_priv;
253 struct {
254 SVGA3dCmdHeader header;
255 SVGA3dCmdDestroyGBShader body;
256 } *cmd;
258 if (likely(res->id == -1))
259 return 0;
261 mutex_lock(&dev_priv->binding_mutex);
262 vmw_context_binding_res_list_scrub(&res->binding_head);
264 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
265 if (unlikely(cmd == NULL)) {
266 DRM_ERROR("Failed reserving FIFO space for shader "
267 "destruction.\n");
268 mutex_unlock(&dev_priv->binding_mutex);
269 return -ENOMEM;
272 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER;
273 cmd->header.size = sizeof(cmd->body);
274 cmd->body.shid = res->id;
275 vmw_fifo_commit(dev_priv, sizeof(*cmd));
276 mutex_unlock(&dev_priv->binding_mutex);
277 vmw_resource_release_id(res);
278 vmw_3d_resource_dec(dev_priv, false);
280 return 0;
284 * User-space shader management:
287 static struct vmw_resource *
288 vmw_user_shader_base_to_res(struct ttm_base_object *base)
290 return &(container_of(base, struct vmw_user_shader, base)->
291 shader.res);
294 static void vmw_user_shader_free(struct vmw_resource *res)
296 struct vmw_user_shader *ushader =
297 container_of(res, struct vmw_user_shader, shader.res);
298 struct vmw_private *dev_priv = res->dev_priv;
300 ttm_base_object_kfree(ushader, base);
301 ttm_mem_global_free(vmw_mem_glob(dev_priv),
302 vmw_user_shader_size);
305 static void vmw_shader_free(struct vmw_resource *res)
307 struct vmw_shader *shader = vmw_res_to_shader(res);
308 struct vmw_private *dev_priv = res->dev_priv;
310 kfree(shader);
311 ttm_mem_global_free(vmw_mem_glob(dev_priv),
312 vmw_shader_size);
316 * This function is called when user space has no more references on the
317 * base object. It releases the base-object's reference on the resource object.
320 static void vmw_user_shader_base_release(struct ttm_base_object **p_base)
322 struct ttm_base_object *base = *p_base;
323 struct vmw_resource *res = vmw_user_shader_base_to_res(base);
325 *p_base = NULL;
326 vmw_resource_unreference(&res);
329 int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
330 struct drm_file *file_priv)
332 struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data;
333 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
335 return ttm_ref_object_base_unref(tfile, arg->handle,
336 TTM_REF_USAGE);
339 static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
340 struct vmw_dma_buffer *buffer,
341 size_t shader_size,
342 size_t offset,
343 SVGA3dShaderType shader_type,
344 struct ttm_object_file *tfile,
345 u32 *handle)
347 struct vmw_user_shader *ushader;
348 struct vmw_resource *res, *tmp;
349 int ret;
352 * Approximate idr memory usage with 128 bytes. It will be limited
353 * by maximum number_of shaders anyway.
355 if (unlikely(vmw_user_shader_size == 0))
356 vmw_user_shader_size =
357 ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
359 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
360 vmw_user_shader_size,
361 false, true);
362 if (unlikely(ret != 0)) {
363 if (ret != -ERESTARTSYS)
364 DRM_ERROR("Out of graphics memory for shader "
365 "creation.\n");
366 goto out;
369 ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
370 if (unlikely(ushader == NULL)) {
371 ttm_mem_global_free(vmw_mem_glob(dev_priv),
372 vmw_user_shader_size);
373 ret = -ENOMEM;
374 goto out;
377 res = &ushader->shader.res;
378 ushader->base.shareable = false;
379 ushader->base.tfile = NULL;
382 * From here on, the destructor takes over resource freeing.
385 ret = vmw_gb_shader_init(dev_priv, res, shader_size,
386 offset, shader_type, buffer,
387 vmw_user_shader_free);
388 if (unlikely(ret != 0))
389 goto out;
391 tmp = vmw_resource_reference(res);
392 ret = ttm_base_object_init(tfile, &ushader->base, false,
393 VMW_RES_SHADER,
394 &vmw_user_shader_base_release, NULL);
396 if (unlikely(ret != 0)) {
397 vmw_resource_unreference(&tmp);
398 goto out_err;
401 if (handle)
402 *handle = ushader->base.hash.key;
403 out_err:
404 vmw_resource_unreference(&res);
405 out:
406 return ret;
410 struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
411 struct vmw_dma_buffer *buffer,
412 size_t shader_size,
413 size_t offset,
414 SVGA3dShaderType shader_type)
416 struct vmw_shader *shader;
417 struct vmw_resource *res;
418 int ret;
421 * Approximate idr memory usage with 128 bytes. It will be limited
422 * by maximum number_of shaders anyway.
424 if (unlikely(vmw_shader_size == 0))
425 vmw_shader_size =
426 ttm_round_pot(sizeof(struct vmw_shader)) + 128;
428 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
429 vmw_shader_size,
430 false, true);
431 if (unlikely(ret != 0)) {
432 if (ret != -ERESTARTSYS)
433 DRM_ERROR("Out of graphics memory for shader "
434 "creation.\n");
435 goto out_err;
438 shader = kzalloc(sizeof(*shader), GFP_KERNEL);
439 if (unlikely(shader == NULL)) {
440 ttm_mem_global_free(vmw_mem_glob(dev_priv),
441 vmw_shader_size);
442 ret = -ENOMEM;
443 goto out_err;
446 res = &shader->res;
449 * From here on, the destructor takes over resource freeing.
451 ret = vmw_gb_shader_init(dev_priv, res, shader_size,
452 offset, shader_type, buffer,
453 vmw_shader_free);
455 out_err:
456 return ret ? ERR_PTR(ret) : res;
460 int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
461 struct drm_file *file_priv)
463 struct vmw_private *dev_priv = vmw_priv(dev);
464 struct drm_vmw_shader_create_arg *arg =
465 (struct drm_vmw_shader_create_arg *)data;
466 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
467 struct vmw_dma_buffer *buffer = NULL;
468 SVGA3dShaderType shader_type;
469 int ret;
471 if (arg->buffer_handle != SVGA3D_INVALID_ID) {
472 ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle,
473 &buffer);
474 if (unlikely(ret != 0)) {
475 DRM_ERROR("Could not find buffer for shader "
476 "creation.\n");
477 return ret;
480 if ((u64)buffer->base.num_pages * PAGE_SIZE <
481 (u64)arg->size + (u64)arg->offset) {
482 DRM_ERROR("Illegal buffer- or shader size.\n");
483 ret = -EINVAL;
484 goto out_bad_arg;
488 switch (arg->shader_type) {
489 case drm_vmw_shader_type_vs:
490 shader_type = SVGA3D_SHADERTYPE_VS;
491 break;
492 case drm_vmw_shader_type_ps:
493 shader_type = SVGA3D_SHADERTYPE_PS;
494 break;
495 case drm_vmw_shader_type_gs:
496 shader_type = SVGA3D_SHADERTYPE_GS;
497 break;
498 default:
499 DRM_ERROR("Illegal shader type.\n");
500 ret = -EINVAL;
501 goto out_bad_arg;
504 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
505 if (unlikely(ret != 0))
506 goto out_bad_arg;
508 ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
509 shader_type, tfile, &arg->shader_handle);
511 ttm_read_unlock(&dev_priv->reservation_sem);
512 out_bad_arg:
513 vmw_dmabuf_unreference(&buffer);
514 return ret;
518 * vmw_compat_shader_id_ok - Check whether a compat shader user key and
519 * shader type are within valid bounds.
521 * @user_key: User space id of the shader.
522 * @shader_type: Shader type.
524 * Returns true if valid false if not.
526 static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
528 return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16;
532 * vmw_compat_shader_key - Compute a hash key suitable for a compat shader.
534 * @user_key: User space id of the shader.
535 * @shader_type: Shader type.
537 * Returns a hash key suitable for a command buffer managed resource
538 * manager hash table.
540 static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type)
542 return user_key | (shader_type << 20);
546 * vmw_compat_shader_remove - Stage a compat shader for removal.
548 * @man: Pointer to the compat shader manager identifying the shader namespace.
549 * @user_key: The key that is used to identify the shader. The key is
550 * unique to the shader type.
551 * @shader_type: Shader type.
552 * @list: Caller's list of staged command buffer resource actions.
554 int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
555 u32 user_key, SVGA3dShaderType shader_type,
556 struct list_head *list)
558 if (!vmw_compat_shader_id_ok(user_key, shader_type))
559 return -EINVAL;
561 return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader,
562 vmw_compat_shader_key(user_key,
563 shader_type),
564 list);
568 * vmw_compat_shader_add - Create a compat shader and stage it for addition
569 * as a command buffer managed resource.
571 * @man: Pointer to the compat shader manager identifying the shader namespace.
572 * @user_key: The key that is used to identify the shader. The key is
573 * unique to the shader type.
574 * @bytecode: Pointer to the bytecode of the shader.
575 * @shader_type: Shader type.
576 * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
577 * to be created with.
578 * @list: Caller's list of staged command buffer resource actions.
581 int vmw_compat_shader_add(struct vmw_private *dev_priv,
582 struct vmw_cmdbuf_res_manager *man,
583 u32 user_key, const void *bytecode,
584 SVGA3dShaderType shader_type,
585 size_t size,
586 struct list_head *list)
588 struct vmw_dma_buffer *buf;
589 struct ttm_bo_kmap_obj map;
590 bool is_iomem;
591 int ret;
592 struct vmw_resource *res;
594 if (!vmw_compat_shader_id_ok(user_key, shader_type))
595 return -EINVAL;
597 /* Allocate and pin a DMA buffer */
598 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
599 if (unlikely(buf == NULL))
600 return -ENOMEM;
602 ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
603 true, vmw_dmabuf_bo_free);
604 if (unlikely(ret != 0))
605 goto out;
607 ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
608 if (unlikely(ret != 0))
609 goto no_reserve;
611 /* Map and copy shader bytecode. */
612 ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
613 &map);
614 if (unlikely(ret != 0)) {
615 ttm_bo_unreserve(&buf->base);
616 goto no_reserve;
619 memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
620 WARN_ON(is_iomem);
622 ttm_bo_kunmap(&map);
623 ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
624 WARN_ON(ret != 0);
625 ttm_bo_unreserve(&buf->base);
627 res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type);
628 if (unlikely(ret != 0))
629 goto no_reserve;
631 ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader,
632 vmw_compat_shader_key(user_key, shader_type),
633 res, list);
634 vmw_resource_unreference(&res);
635 no_reserve:
636 vmw_dmabuf_unreference(&buf);
637 out:
638 return ret;
642 * vmw_compat_shader_lookup - Look up a compat shader
644 * @man: Pointer to the command buffer managed resource manager identifying
645 * the shader namespace.
646 * @user_key: The user space id of the shader.
647 * @shader_type: The shader type.
649 * Returns a refcounted pointer to a struct vmw_resource if the shader was
650 * found. An error pointer otherwise.
652 struct vmw_resource *
653 vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
654 u32 user_key,
655 SVGA3dShaderType shader_type)
657 if (!vmw_compat_shader_id_ok(user_key, shader_type))
658 return ERR_PTR(-EINVAL);
660 return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader,
661 vmw_compat_shader_key(user_key,
662 shader_type));