MIPS: Yosemite, Emma: Fix off-by-two in arcs_cmdline buffer size check
[linux-2.6/linux-mips.git] / drivers / gpu / drm / vmwgfx / vmwgfx_resource.c
blob86c5e4cceb31ef83beb568e3b912de62f73a2648
1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_drm.h"
30 #include "ttm/ttm_object.h"
31 #include "ttm/ttm_placement.h"
32 #include "drmP.h"
34 struct vmw_user_context {
35 struct ttm_base_object base;
36 struct vmw_resource res;
39 struct vmw_user_surface {
40 struct ttm_base_object base;
41 struct vmw_surface srf;
42 uint32_t size;
45 struct vmw_user_dma_buffer {
46 struct ttm_base_object base;
47 struct vmw_dma_buffer dma;
50 struct vmw_bo_user_rep {
51 uint32_t handle;
52 uint64_t map_handle;
55 struct vmw_stream {
56 struct vmw_resource res;
57 uint32_t stream_id;
60 struct vmw_user_stream {
61 struct ttm_base_object base;
62 struct vmw_stream stream;
65 struct vmw_surface_offset {
66 uint32_t face;
67 uint32_t mip;
68 uint32_t bo_offset;
72 static uint64_t vmw_user_context_size;
73 static uint64_t vmw_user_surface_size;
74 static uint64_t vmw_user_stream_size;
76 static inline struct vmw_dma_buffer *
77 vmw_dma_buffer(struct ttm_buffer_object *bo)
79 return container_of(bo, struct vmw_dma_buffer, base);
82 static inline struct vmw_user_dma_buffer *
83 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
85 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
86 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
89 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
91 kref_get(&res->kref);
92 return res;
96 /**
97 * vmw_resource_release_id - release a resource id to the id manager.
99 * @res: Pointer to the resource.
101 * Release the resource id to the resource id manager and set it to -1
103 static void vmw_resource_release_id(struct vmw_resource *res)
105 struct vmw_private *dev_priv = res->dev_priv;
107 write_lock(&dev_priv->resource_lock);
108 if (res->id != -1)
109 idr_remove(res->idr, res->id);
110 res->id = -1;
111 write_unlock(&dev_priv->resource_lock);
114 static void vmw_resource_release(struct kref *kref)
116 struct vmw_resource *res =
117 container_of(kref, struct vmw_resource, kref);
118 struct vmw_private *dev_priv = res->dev_priv;
119 int id = res->id;
120 struct idr *idr = res->idr;
122 res->avail = false;
123 if (res->remove_from_lists != NULL)
124 res->remove_from_lists(res);
125 write_unlock(&dev_priv->resource_lock);
127 if (likely(res->hw_destroy != NULL))
128 res->hw_destroy(res);
130 if (res->res_free != NULL)
131 res->res_free(res);
132 else
133 kfree(res);
135 write_lock(&dev_priv->resource_lock);
137 if (id != -1)
138 idr_remove(idr, id);
141 void vmw_resource_unreference(struct vmw_resource **p_res)
143 struct vmw_resource *res = *p_res;
144 struct vmw_private *dev_priv = res->dev_priv;
146 *p_res = NULL;
147 write_lock(&dev_priv->resource_lock);
148 kref_put(&res->kref, vmw_resource_release);
149 write_unlock(&dev_priv->resource_lock);
154 * vmw_resource_alloc_id - release a resource id to the id manager.
156 * @dev_priv: Pointer to the device private structure.
157 * @res: Pointer to the resource.
159 * Allocate the lowest free resource from the resource manager, and set
160 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
162 static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
163 struct vmw_resource *res)
165 int ret;
167 BUG_ON(res->id != -1);
169 do {
170 if (unlikely(idr_pre_get(res->idr, GFP_KERNEL) == 0))
171 return -ENOMEM;
173 write_lock(&dev_priv->resource_lock);
174 ret = idr_get_new_above(res->idr, res, 1, &res->id);
175 write_unlock(&dev_priv->resource_lock);
177 } while (ret == -EAGAIN);
179 return ret;
183 static int vmw_resource_init(struct vmw_private *dev_priv,
184 struct vmw_resource *res,
185 struct idr *idr,
186 enum ttm_object_type obj_type,
187 bool delay_id,
188 void (*res_free) (struct vmw_resource *res),
189 void (*remove_from_lists)
190 (struct vmw_resource *res))
192 kref_init(&res->kref);
193 res->hw_destroy = NULL;
194 res->res_free = res_free;
195 res->remove_from_lists = remove_from_lists;
196 res->res_type = obj_type;
197 res->idr = idr;
198 res->avail = false;
199 res->dev_priv = dev_priv;
200 INIT_LIST_HEAD(&res->query_head);
201 INIT_LIST_HEAD(&res->validate_head);
202 res->id = -1;
203 if (delay_id)
204 return 0;
205 else
206 return vmw_resource_alloc_id(dev_priv, res);
210 * vmw_resource_activate
212 * @res: Pointer to the newly created resource
213 * @hw_destroy: Destroy function. NULL if none.
215 * Activate a resource after the hardware has been made aware of it.
216 * Set tye destroy function to @destroy. Typically this frees the
217 * resource and destroys the hardware resources associated with it.
218 * Activate basically means that the function vmw_resource_lookup will
219 * find it.
222 static void vmw_resource_activate(struct vmw_resource *res,
223 void (*hw_destroy) (struct vmw_resource *))
225 struct vmw_private *dev_priv = res->dev_priv;
227 write_lock(&dev_priv->resource_lock);
228 res->avail = true;
229 res->hw_destroy = hw_destroy;
230 write_unlock(&dev_priv->resource_lock);
233 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
234 struct idr *idr, int id)
236 struct vmw_resource *res;
238 read_lock(&dev_priv->resource_lock);
239 res = idr_find(idr, id);
240 if (res && res->avail)
241 kref_get(&res->kref);
242 else
243 res = NULL;
244 read_unlock(&dev_priv->resource_lock);
246 if (unlikely(res == NULL))
247 return NULL;
249 return res;
253 * Context management:
256 static void vmw_hw_context_destroy(struct vmw_resource *res)
259 struct vmw_private *dev_priv = res->dev_priv;
260 struct {
261 SVGA3dCmdHeader header;
262 SVGA3dCmdDestroyContext body;
263 } *cmd;
266 vmw_execbuf_release_pinned_bo(dev_priv, true, res->id);
268 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
269 if (unlikely(cmd == NULL)) {
270 DRM_ERROR("Failed reserving FIFO space for surface "
271 "destruction.\n");
272 return;
275 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
276 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
277 cmd->body.cid = cpu_to_le32(res->id);
279 vmw_fifo_commit(dev_priv, sizeof(*cmd));
280 vmw_3d_resource_dec(dev_priv, false);
283 static int vmw_context_init(struct vmw_private *dev_priv,
284 struct vmw_resource *res,
285 void (*res_free) (struct vmw_resource *res))
287 int ret;
289 struct {
290 SVGA3dCmdHeader header;
291 SVGA3dCmdDefineContext body;
292 } *cmd;
294 ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
295 VMW_RES_CONTEXT, false, res_free, NULL);
297 if (unlikely(ret != 0)) {
298 DRM_ERROR("Failed to allocate a resource id.\n");
299 goto out_early;
302 if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
303 DRM_ERROR("Out of hw context ids.\n");
304 vmw_resource_unreference(&res);
305 return -ENOMEM;
308 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
309 if (unlikely(cmd == NULL)) {
310 DRM_ERROR("Fifo reserve failed.\n");
311 vmw_resource_unreference(&res);
312 return -ENOMEM;
315 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
316 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
317 cmd->body.cid = cpu_to_le32(res->id);
319 vmw_fifo_commit(dev_priv, sizeof(*cmd));
320 (void) vmw_3d_resource_inc(dev_priv, false);
321 vmw_resource_activate(res, vmw_hw_context_destroy);
322 return 0;
324 out_early:
325 if (res_free == NULL)
326 kfree(res);
327 else
328 res_free(res);
329 return ret;
332 struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
334 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
335 int ret;
337 if (unlikely(res == NULL))
338 return NULL;
340 ret = vmw_context_init(dev_priv, res, NULL);
341 return (ret == 0) ? res : NULL;
345 * User-space context management:
348 static void vmw_user_context_free(struct vmw_resource *res)
350 struct vmw_user_context *ctx =
351 container_of(res, struct vmw_user_context, res);
352 struct vmw_private *dev_priv = res->dev_priv;
354 kfree(ctx);
355 ttm_mem_global_free(vmw_mem_glob(dev_priv),
356 vmw_user_context_size);
360 * This function is called when user space has no more references on the
361 * base object. It releases the base-object's reference on the resource object.
364 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
366 struct ttm_base_object *base = *p_base;
367 struct vmw_user_context *ctx =
368 container_of(base, struct vmw_user_context, base);
369 struct vmw_resource *res = &ctx->res;
371 *p_base = NULL;
372 vmw_resource_unreference(&res);
375 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
376 struct drm_file *file_priv)
378 struct vmw_private *dev_priv = vmw_priv(dev);
379 struct vmw_resource *res;
380 struct vmw_user_context *ctx;
381 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
382 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
383 int ret = 0;
385 res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
386 if (unlikely(res == NULL))
387 return -EINVAL;
389 if (res->res_free != &vmw_user_context_free) {
390 ret = -EINVAL;
391 goto out;
394 ctx = container_of(res, struct vmw_user_context, res);
395 if (ctx->base.tfile != tfile && !ctx->base.shareable) {
396 ret = -EPERM;
397 goto out;
400 ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
401 out:
402 vmw_resource_unreference(&res);
403 return ret;
406 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
407 struct drm_file *file_priv)
409 struct vmw_private *dev_priv = vmw_priv(dev);
410 struct vmw_user_context *ctx;
411 struct vmw_resource *res;
412 struct vmw_resource *tmp;
413 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
414 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
415 struct vmw_master *vmaster = vmw_master(file_priv->master);
416 int ret;
420 * Approximate idr memory usage with 128 bytes. It will be limited
421 * by maximum number_of contexts anyway.
424 if (unlikely(vmw_user_context_size == 0))
425 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
427 ret = ttm_read_lock(&vmaster->lock, true);
428 if (unlikely(ret != 0))
429 return ret;
431 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
432 vmw_user_context_size,
433 false, true);
434 if (unlikely(ret != 0)) {
435 if (ret != -ERESTARTSYS)
436 DRM_ERROR("Out of graphics memory for context"
437 " creation.\n");
438 goto out_unlock;
441 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
442 if (unlikely(ctx == NULL)) {
443 ttm_mem_global_free(vmw_mem_glob(dev_priv),
444 vmw_user_context_size);
445 ret = -ENOMEM;
446 goto out_unlock;
449 res = &ctx->res;
450 ctx->base.shareable = false;
451 ctx->base.tfile = NULL;
454 * From here on, the destructor takes over resource freeing.
457 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
458 if (unlikely(ret != 0))
459 goto out_unlock;
461 tmp = vmw_resource_reference(&ctx->res);
462 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
463 &vmw_user_context_base_release, NULL);
465 if (unlikely(ret != 0)) {
466 vmw_resource_unreference(&tmp);
467 goto out_err;
470 arg->cid = res->id;
471 out_err:
472 vmw_resource_unreference(&res);
473 out_unlock:
474 ttm_read_unlock(&vmaster->lock);
475 return ret;
479 int vmw_context_check(struct vmw_private *dev_priv,
480 struct ttm_object_file *tfile,
481 int id,
482 struct vmw_resource **p_res)
484 struct vmw_resource *res;
485 int ret = 0;
487 read_lock(&dev_priv->resource_lock);
488 res = idr_find(&dev_priv->context_idr, id);
489 if (res && res->avail) {
490 struct vmw_user_context *ctx =
491 container_of(res, struct vmw_user_context, res);
492 if (ctx->base.tfile != tfile && !ctx->base.shareable)
493 ret = -EPERM;
494 if (p_res)
495 *p_res = vmw_resource_reference(res);
496 } else
497 ret = -EINVAL;
498 read_unlock(&dev_priv->resource_lock);
500 return ret;
503 struct vmw_bpp {
504 uint8_t bpp;
505 uint8_t s_bpp;
509 * Size table for the supported SVGA3D surface formats. It consists of
510 * two values. The bpp value and the s_bpp value which is short for
511 * "stride bits per pixel" The values are given in such a way that the
512 * minimum stride for the image is calculated using
514 * min_stride = w*s_bpp
516 * and the total memory requirement for the image is
518 * h*min_stride*bpp/s_bpp
521 static const struct vmw_bpp vmw_sf_bpp[] = {
522 [SVGA3D_FORMAT_INVALID] = {0, 0},
523 [SVGA3D_X8R8G8B8] = {32, 32},
524 [SVGA3D_A8R8G8B8] = {32, 32},
525 [SVGA3D_R5G6B5] = {16, 16},
526 [SVGA3D_X1R5G5B5] = {16, 16},
527 [SVGA3D_A1R5G5B5] = {16, 16},
528 [SVGA3D_A4R4G4B4] = {16, 16},
529 [SVGA3D_Z_D32] = {32, 32},
530 [SVGA3D_Z_D16] = {16, 16},
531 [SVGA3D_Z_D24S8] = {32, 32},
532 [SVGA3D_Z_D15S1] = {16, 16},
533 [SVGA3D_LUMINANCE8] = {8, 8},
534 [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8},
535 [SVGA3D_LUMINANCE16] = {16, 16},
536 [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16},
537 [SVGA3D_DXT1] = {4, 16},
538 [SVGA3D_DXT2] = {8, 32},
539 [SVGA3D_DXT3] = {8, 32},
540 [SVGA3D_DXT4] = {8, 32},
541 [SVGA3D_DXT5] = {8, 32},
542 [SVGA3D_BUMPU8V8] = {16, 16},
543 [SVGA3D_BUMPL6V5U5] = {16, 16},
544 [SVGA3D_BUMPX8L8V8U8] = {32, 32},
545 [SVGA3D_ARGB_S10E5] = {16, 16},
546 [SVGA3D_ARGB_S23E8] = {32, 32},
547 [SVGA3D_A2R10G10B10] = {32, 32},
548 [SVGA3D_V8U8] = {16, 16},
549 [SVGA3D_Q8W8V8U8] = {32, 32},
550 [SVGA3D_CxV8U8] = {16, 16},
551 [SVGA3D_X8L8V8U8] = {32, 32},
552 [SVGA3D_A2W10V10U10] = {32, 32},
553 [SVGA3D_ALPHA8] = {8, 8},
554 [SVGA3D_R_S10E5] = {16, 16},
555 [SVGA3D_R_S23E8] = {32, 32},
556 [SVGA3D_RG_S10E5] = {16, 16},
557 [SVGA3D_RG_S23E8] = {32, 32},
558 [SVGA3D_BUFFER] = {8, 8},
559 [SVGA3D_Z_D24X8] = {32, 32},
560 [SVGA3D_V16U16] = {32, 32},
561 [SVGA3D_G16R16] = {32, 32},
562 [SVGA3D_A16B16G16R16] = {64, 64},
563 [SVGA3D_UYVY] = {12, 12},
564 [SVGA3D_YUY2] = {12, 12},
565 [SVGA3D_NV12] = {12, 8},
566 [SVGA3D_AYUV] = {32, 32},
567 [SVGA3D_BC4_UNORM] = {4, 16},
568 [SVGA3D_BC5_UNORM] = {8, 32},
569 [SVGA3D_Z_DF16] = {16, 16},
570 [SVGA3D_Z_DF24] = {24, 24},
571 [SVGA3D_Z_D24S8_INT] = {32, 32}
576 * Surface management.
579 struct vmw_surface_dma {
580 SVGA3dCmdHeader header;
581 SVGA3dCmdSurfaceDMA body;
582 SVGA3dCopyBox cb;
583 SVGA3dCmdSurfaceDMASuffix suffix;
586 struct vmw_surface_define {
587 SVGA3dCmdHeader header;
588 SVGA3dCmdDefineSurface body;
591 struct vmw_surface_destroy {
592 SVGA3dCmdHeader header;
593 SVGA3dCmdDestroySurface body;
598 * vmw_surface_dma_size - Compute fifo size for a dma command.
600 * @srf: Pointer to a struct vmw_surface
602 * Computes the required size for a surface dma command for backup or
603 * restoration of the surface represented by @srf.
605 static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
607 return srf->num_sizes * sizeof(struct vmw_surface_dma);
612 * vmw_surface_define_size - Compute fifo size for a surface define command.
614 * @srf: Pointer to a struct vmw_surface
616 * Computes the required size for a surface define command for the definition
617 * of the surface represented by @srf.
619 static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
621 return sizeof(struct vmw_surface_define) + srf->num_sizes *
622 sizeof(SVGA3dSize);
627 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
629 * Computes the required size for a surface destroy command for the destruction
630 * of a hw surface.
632 static inline uint32_t vmw_surface_destroy_size(void)
634 return sizeof(struct vmw_surface_destroy);
638 * vmw_surface_destroy_encode - Encode a surface_destroy command.
640 * @id: The surface id
641 * @cmd_space: Pointer to memory area in which the commands should be encoded.
643 static void vmw_surface_destroy_encode(uint32_t id,
644 void *cmd_space)
646 struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
647 cmd_space;
649 cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
650 cmd->header.size = sizeof(cmd->body);
651 cmd->body.sid = id;
655 * vmw_surface_define_encode - Encode a surface_define command.
657 * @srf: Pointer to a struct vmw_surface object.
658 * @cmd_space: Pointer to memory area in which the commands should be encoded.
660 static void vmw_surface_define_encode(const struct vmw_surface *srf,
661 void *cmd_space)
663 struct vmw_surface_define *cmd = (struct vmw_surface_define *)
664 cmd_space;
665 struct drm_vmw_size *src_size;
666 SVGA3dSize *cmd_size;
667 uint32_t cmd_len;
668 int i;
670 cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
672 cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
673 cmd->header.size = cmd_len;
674 cmd->body.sid = srf->res.id;
675 cmd->body.surfaceFlags = srf->flags;
676 cmd->body.format = cpu_to_le32(srf->format);
677 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
678 cmd->body.face[i].numMipLevels = srf->mip_levels[i];
680 cmd += 1;
681 cmd_size = (SVGA3dSize *) cmd;
682 src_size = srf->sizes;
684 for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
685 cmd_size->width = src_size->width;
686 cmd_size->height = src_size->height;
687 cmd_size->depth = src_size->depth;
693 * vmw_surface_dma_encode - Encode a surface_dma command.
695 * @srf: Pointer to a struct vmw_surface object.
696 * @cmd_space: Pointer to memory area in which the commands should be encoded.
697 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
698 * should be placed or read from.
699 * @to_surface: Boolean whether to DMA to the surface or from the surface.
701 static void vmw_surface_dma_encode(struct vmw_surface *srf,
702 void *cmd_space,
703 const SVGAGuestPtr *ptr,
704 bool to_surface)
706 uint32_t i;
707 uint32_t bpp = vmw_sf_bpp[srf->format].bpp;
708 uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
709 struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
711 for (i = 0; i < srf->num_sizes; ++i) {
712 SVGA3dCmdHeader *header = &cmd->header;
713 SVGA3dCmdSurfaceDMA *body = &cmd->body;
714 SVGA3dCopyBox *cb = &cmd->cb;
715 SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
716 const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
717 const struct drm_vmw_size *cur_size = &srf->sizes[i];
719 header->id = SVGA_3D_CMD_SURFACE_DMA;
720 header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
722 body->guest.ptr = *ptr;
723 body->guest.ptr.offset += cur_offset->bo_offset;
724 body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3;
725 body->host.sid = srf->res.id;
726 body->host.face = cur_offset->face;
727 body->host.mipmap = cur_offset->mip;
728 body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
729 SVGA3D_READ_HOST_VRAM);
730 cb->x = 0;
731 cb->y = 0;
732 cb->z = 0;
733 cb->srcx = 0;
734 cb->srcy = 0;
735 cb->srcz = 0;
736 cb->w = cur_size->width;
737 cb->h = cur_size->height;
738 cb->d = cur_size->depth;
740 suffix->suffixSize = sizeof(*suffix);
741 suffix->maximumOffset = body->guest.pitch*cur_size->height*
742 cur_size->depth*bpp / stride_bpp;
743 suffix->flags.discard = 0;
744 suffix->flags.unsynchronized = 0;
745 suffix->flags.reserved = 0;
746 ++cmd;
751 static void vmw_hw_surface_destroy(struct vmw_resource *res)
754 struct vmw_private *dev_priv = res->dev_priv;
755 struct vmw_surface *srf;
756 void *cmd;
758 if (res->id != -1) {
760 cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
761 if (unlikely(cmd == NULL)) {
762 DRM_ERROR("Failed reserving FIFO space for surface "
763 "destruction.\n");
764 return;
767 vmw_surface_destroy_encode(res->id, cmd);
768 vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
771 * used_memory_size_atomic, or separate lock
772 * to avoid taking dev_priv::cmdbuf_mutex in
773 * the destroy path.
776 mutex_lock(&dev_priv->cmdbuf_mutex);
777 srf = container_of(res, struct vmw_surface, res);
778 dev_priv->used_memory_size -= srf->backup_size;
779 mutex_unlock(&dev_priv->cmdbuf_mutex);
782 vmw_3d_resource_dec(dev_priv, false);
785 void vmw_surface_res_free(struct vmw_resource *res)
787 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
789 if (srf->backup)
790 ttm_bo_unref(&srf->backup);
791 kfree(srf->offsets);
792 kfree(srf->sizes);
793 kfree(srf->snooper.image);
794 kfree(srf);
799 * vmw_surface_do_validate - make a surface available to the device.
801 * @dev_priv: Pointer to a device private struct.
802 * @srf: Pointer to a struct vmw_surface.
804 * If the surface doesn't have a hw id, allocate one, and optionally
805 * DMA the backed up surface contents to the device.
807 * Returns -EBUSY if there wasn't sufficient device resources to
808 * complete the validation. Retry after freeing up resources.
810 * May return other errors if the kernel is out of guest resources.
812 int vmw_surface_do_validate(struct vmw_private *dev_priv,
813 struct vmw_surface *srf)
815 struct vmw_resource *res = &srf->res;
816 struct list_head val_list;
817 struct ttm_validate_buffer val_buf;
818 uint32_t submit_size;
819 uint8_t *cmd;
820 int ret;
822 if (likely(res->id != -1))
823 return 0;
825 if (unlikely(dev_priv->used_memory_size + srf->backup_size >=
826 dev_priv->memory_size))
827 return -EBUSY;
830 * Reserve- and validate the backup DMA bo.
833 if (srf->backup) {
834 INIT_LIST_HEAD(&val_list);
835 val_buf.bo = ttm_bo_reference(srf->backup);
836 val_buf.new_sync_obj_arg = (void *)((unsigned long)
837 DRM_VMW_FENCE_FLAG_EXEC);
838 list_add_tail(&val_buf.head, &val_list);
839 ret = ttm_eu_reserve_buffers(&val_list);
840 if (unlikely(ret != 0))
841 goto out_no_reserve;
843 ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
844 true, false, false);
845 if (unlikely(ret != 0))
846 goto out_no_validate;
850 * Alloc id for the resource.
853 ret = vmw_resource_alloc_id(dev_priv, res);
854 if (unlikely(ret != 0)) {
855 DRM_ERROR("Failed to allocate a surface id.\n");
856 goto out_no_id;
858 if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
859 ret = -EBUSY;
860 goto out_no_fifo;
865 * Encode surface define- and dma commands.
868 submit_size = vmw_surface_define_size(srf);
869 if (srf->backup)
870 submit_size += vmw_surface_dma_size(srf);
872 cmd = vmw_fifo_reserve(dev_priv, submit_size);
873 if (unlikely(cmd == NULL)) {
874 DRM_ERROR("Failed reserving FIFO space for surface "
875 "validation.\n");
876 ret = -ENOMEM;
877 goto out_no_fifo;
880 vmw_surface_define_encode(srf, cmd);
881 if (srf->backup) {
882 SVGAGuestPtr ptr;
884 cmd += vmw_surface_define_size(srf);
885 vmw_bo_get_guest_ptr(srf->backup, &ptr);
886 vmw_surface_dma_encode(srf, cmd, &ptr, true);
889 vmw_fifo_commit(dev_priv, submit_size);
892 * Create a fence object and fence the backup buffer.
895 if (srf->backup) {
896 struct vmw_fence_obj *fence;
898 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
899 &fence, NULL);
900 ttm_eu_fence_buffer_objects(&val_list, fence);
901 if (likely(fence != NULL))
902 vmw_fence_obj_unreference(&fence);
903 ttm_bo_unref(&val_buf.bo);
904 ttm_bo_unref(&srf->backup);
908 * Surface memory usage accounting.
911 dev_priv->used_memory_size += srf->backup_size;
913 return 0;
915 out_no_fifo:
916 vmw_resource_release_id(res);
917 out_no_id:
918 out_no_validate:
919 if (srf->backup)
920 ttm_eu_backoff_reservation(&val_list);
921 out_no_reserve:
922 if (srf->backup)
923 ttm_bo_unref(&val_buf.bo);
924 return ret;
928 * vmw_surface_evict - Evict a hw surface.
930 * @dev_priv: Pointer to a device private struct.
931 * @srf: Pointer to a struct vmw_surface
933 * DMA the contents of a hw surface to a backup guest buffer object,
934 * and destroy the hw surface, releasing its id.
936 int vmw_surface_evict(struct vmw_private *dev_priv,
937 struct vmw_surface *srf)
939 struct vmw_resource *res = &srf->res;
940 struct list_head val_list;
941 struct ttm_validate_buffer val_buf;
942 uint32_t submit_size;
943 uint8_t *cmd;
944 int ret;
945 struct vmw_fence_obj *fence;
946 SVGAGuestPtr ptr;
948 BUG_ON(res->id == -1);
951 * Create a surface backup buffer object.
954 if (!srf->backup) {
955 ret = ttm_bo_create(&dev_priv->bdev, srf->backup_size,
956 ttm_bo_type_device,
957 &vmw_srf_placement, 0, 0, true,
958 NULL, &srf->backup);
959 if (unlikely(ret != 0))
960 return ret;
964 * Reserve- and validate the backup DMA bo.
967 INIT_LIST_HEAD(&val_list);
968 val_buf.bo = ttm_bo_reference(srf->backup);
969 val_buf.new_sync_obj_arg = (void *)(unsigned long)
970 DRM_VMW_FENCE_FLAG_EXEC;
971 list_add_tail(&val_buf.head, &val_list);
972 ret = ttm_eu_reserve_buffers(&val_list);
973 if (unlikely(ret != 0))
974 goto out_no_reserve;
976 ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
977 true, false, false);
978 if (unlikely(ret != 0))
979 goto out_no_validate;
983 * Encode the dma- and surface destroy commands.
986 submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size();
987 cmd = vmw_fifo_reserve(dev_priv, submit_size);
988 if (unlikely(cmd == NULL)) {
989 DRM_ERROR("Failed reserving FIFO space for surface "
990 "eviction.\n");
991 ret = -ENOMEM;
992 goto out_no_fifo;
995 vmw_bo_get_guest_ptr(srf->backup, &ptr);
996 vmw_surface_dma_encode(srf, cmd, &ptr, false);
997 cmd += vmw_surface_dma_size(srf);
998 vmw_surface_destroy_encode(res->id, cmd);
999 vmw_fifo_commit(dev_priv, submit_size);
1002 * Surface memory usage accounting.
1005 dev_priv->used_memory_size -= srf->backup_size;
1008 * Create a fence object and fence the DMA buffer.
1011 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
1012 &fence, NULL);
1013 ttm_eu_fence_buffer_objects(&val_list, fence);
1014 if (likely(fence != NULL))
1015 vmw_fence_obj_unreference(&fence);
1016 ttm_bo_unref(&val_buf.bo);
1019 * Release the surface ID.
1022 vmw_resource_release_id(res);
1024 return 0;
1026 out_no_fifo:
1027 out_no_validate:
1028 if (srf->backup)
1029 ttm_eu_backoff_reservation(&val_list);
1030 out_no_reserve:
1031 ttm_bo_unref(&val_buf.bo);
1032 ttm_bo_unref(&srf->backup);
1033 return ret;
1038 * vmw_surface_validate - make a surface available to the device, evicting
1039 * other surfaces if needed.
1041 * @dev_priv: Pointer to a device private struct.
1042 * @srf: Pointer to a struct vmw_surface.
1044 * Try to validate a surface and if it fails due to limited device resources,
1045 * repeatedly try to evict other surfaces until the request can be
1046 * acommodated.
1048 * May return errors if out of resources.
1050 int vmw_surface_validate(struct vmw_private *dev_priv,
1051 struct vmw_surface *srf)
1053 int ret;
1054 struct vmw_surface *evict_srf;
1056 do {
1057 write_lock(&dev_priv->resource_lock);
1058 list_del_init(&srf->lru_head);
1059 write_unlock(&dev_priv->resource_lock);
1061 ret = vmw_surface_do_validate(dev_priv, srf);
1062 if (likely(ret != -EBUSY))
1063 break;
1065 write_lock(&dev_priv->resource_lock);
1066 if (list_empty(&dev_priv->surface_lru)) {
1067 DRM_ERROR("Out of device memory for surfaces.\n");
1068 ret = -EBUSY;
1069 write_unlock(&dev_priv->resource_lock);
1070 break;
1073 evict_srf = vmw_surface_reference
1074 (list_first_entry(&dev_priv->surface_lru,
1075 struct vmw_surface,
1076 lru_head));
1077 list_del_init(&evict_srf->lru_head);
1079 write_unlock(&dev_priv->resource_lock);
1080 (void) vmw_surface_evict(dev_priv, evict_srf);
1082 vmw_surface_unreference(&evict_srf);
1084 } while (1);
1086 if (unlikely(ret != 0 && srf->res.id != -1)) {
1087 write_lock(&dev_priv->resource_lock);
1088 list_add_tail(&srf->lru_head, &dev_priv->surface_lru);
1089 write_unlock(&dev_priv->resource_lock);
1092 return ret;
1097 * vmw_surface_remove_from_lists - Remove surface resources from lookup lists
1099 * @res: Pointer to a struct vmw_resource embedded in a struct vmw_surface
1101 * As part of the resource destruction, remove the surface from any
1102 * lookup lists.
1104 static void vmw_surface_remove_from_lists(struct vmw_resource *res)
1106 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
1108 list_del_init(&srf->lru_head);
1111 int vmw_surface_init(struct vmw_private *dev_priv,
1112 struct vmw_surface *srf,
1113 void (*res_free) (struct vmw_resource *res))
1115 int ret;
1116 struct vmw_resource *res = &srf->res;
1118 BUG_ON(res_free == NULL);
1119 INIT_LIST_HEAD(&srf->lru_head);
1120 ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
1121 VMW_RES_SURFACE, true, res_free,
1122 vmw_surface_remove_from_lists);
1124 if (unlikely(ret != 0))
1125 res_free(res);
1128 * The surface won't be visible to hardware until a
1129 * surface validate.
1132 (void) vmw_3d_resource_inc(dev_priv, false);
1133 vmw_resource_activate(res, vmw_hw_surface_destroy);
1134 return ret;
1137 static void vmw_user_surface_free(struct vmw_resource *res)
1139 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
1140 struct vmw_user_surface *user_srf =
1141 container_of(srf, struct vmw_user_surface, srf);
1142 struct vmw_private *dev_priv = srf->res.dev_priv;
1143 uint32_t size = user_srf->size;
1145 if (srf->backup)
1146 ttm_bo_unref(&srf->backup);
1147 kfree(srf->offsets);
1148 kfree(srf->sizes);
1149 kfree(srf->snooper.image);
1150 kfree(user_srf);
1151 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
1155 * vmw_resource_unreserve - unreserve resources previously reserved for
1156 * command submission.
1158 * @list_head: list of resources to unreserve.
1160 * Currently only surfaces are considered, and unreserving a surface
1161 * means putting it back on the device's surface lru list,
1162 * so that it can be evicted if necessary.
1163 * This function traverses the resource list and
1164 * checks whether resources are surfaces, and in that case puts them back
1165 * on the device's surface LRU list.
1167 void vmw_resource_unreserve(struct list_head *list)
1169 struct vmw_resource *res;
1170 struct vmw_surface *srf;
1171 rwlock_t *lock = NULL;
1173 list_for_each_entry(res, list, validate_head) {
1175 if (res->res_free != &vmw_surface_res_free &&
1176 res->res_free != &vmw_user_surface_free)
1177 continue;
1179 if (unlikely(lock == NULL)) {
1180 lock = &res->dev_priv->resource_lock;
1181 write_lock(lock);
1184 srf = container_of(res, struct vmw_surface, res);
1185 list_del_init(&srf->lru_head);
1186 list_add_tail(&srf->lru_head, &res->dev_priv->surface_lru);
1189 if (lock != NULL)
1190 write_unlock(lock);
1194 int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
1195 struct ttm_object_file *tfile,
1196 uint32_t handle, struct vmw_surface **out)
1198 struct vmw_resource *res;
1199 struct vmw_surface *srf;
1200 struct vmw_user_surface *user_srf;
1201 struct ttm_base_object *base;
1202 int ret = -EINVAL;
1204 base = ttm_base_object_lookup(tfile, handle);
1205 if (unlikely(base == NULL))
1206 return -EINVAL;
1208 if (unlikely(base->object_type != VMW_RES_SURFACE))
1209 goto out_bad_resource;
1211 user_srf = container_of(base, struct vmw_user_surface, base);
1212 srf = &user_srf->srf;
1213 res = &srf->res;
1215 read_lock(&dev_priv->resource_lock);
1217 if (!res->avail || res->res_free != &vmw_user_surface_free) {
1218 read_unlock(&dev_priv->resource_lock);
1219 goto out_bad_resource;
1222 kref_get(&res->kref);
1223 read_unlock(&dev_priv->resource_lock);
1225 *out = srf;
1226 ret = 0;
1228 out_bad_resource:
1229 ttm_base_object_unref(&base);
1231 return ret;
1234 static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
1236 struct ttm_base_object *base = *p_base;
1237 struct vmw_user_surface *user_srf =
1238 container_of(base, struct vmw_user_surface, base);
1239 struct vmw_resource *res = &user_srf->srf.res;
1241 *p_base = NULL;
1242 vmw_resource_unreference(&res);
1245 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
1246 struct drm_file *file_priv)
1248 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
1249 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1251 return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
1254 int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1255 struct drm_file *file_priv)
1257 struct vmw_private *dev_priv = vmw_priv(dev);
1258 struct vmw_user_surface *user_srf;
1259 struct vmw_surface *srf;
1260 struct vmw_resource *res;
1261 struct vmw_resource *tmp;
1262 union drm_vmw_surface_create_arg *arg =
1263 (union drm_vmw_surface_create_arg *)data;
1264 struct drm_vmw_surface_create_req *req = &arg->req;
1265 struct drm_vmw_surface_arg *rep = &arg->rep;
1266 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1267 struct drm_vmw_size __user *user_sizes;
1268 int ret;
1269 int i, j;
1270 uint32_t cur_bo_offset;
1271 struct drm_vmw_size *cur_size;
1272 struct vmw_surface_offset *cur_offset;
1273 uint32_t stride_bpp;
1274 uint32_t bpp;
1275 uint32_t num_sizes;
1276 uint32_t size;
1277 struct vmw_master *vmaster = vmw_master(file_priv->master);
1279 if (unlikely(vmw_user_surface_size == 0))
1280 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
1281 128;
1283 num_sizes = 0;
1284 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
1285 num_sizes += req->mip_levels[i];
1287 if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
1288 DRM_VMW_MAX_MIP_LEVELS)
1289 return -EINVAL;
1291 size = vmw_user_surface_size + 128 +
1292 ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
1293 ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
1296 ret = ttm_read_lock(&vmaster->lock, true);
1297 if (unlikely(ret != 0))
1298 return ret;
1300 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
1301 size, false, true);
1302 if (unlikely(ret != 0)) {
1303 if (ret != -ERESTARTSYS)
1304 DRM_ERROR("Out of graphics memory for surface"
1305 " creation.\n");
1306 goto out_unlock;
1309 user_srf = kmalloc(sizeof(*user_srf), GFP_KERNEL);
1310 if (unlikely(user_srf == NULL)) {
1311 ret = -ENOMEM;
1312 goto out_no_user_srf;
1315 srf = &user_srf->srf;
1316 res = &srf->res;
1318 srf->flags = req->flags;
1319 srf->format = req->format;
1320 srf->scanout = req->scanout;
1321 srf->backup = NULL;
1323 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
1324 srf->num_sizes = num_sizes;
1325 user_srf->size = size;
1327 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
1328 if (unlikely(srf->sizes == NULL)) {
1329 ret = -ENOMEM;
1330 goto out_no_sizes;
1332 srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
1333 GFP_KERNEL);
1334 if (unlikely(srf->sizes == NULL)) {
1335 ret = -ENOMEM;
1336 goto out_no_offsets;
1339 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
1340 req->size_addr;
1342 ret = copy_from_user(srf->sizes, user_sizes,
1343 srf->num_sizes * sizeof(*srf->sizes));
1344 if (unlikely(ret != 0)) {
1345 ret = -EFAULT;
1346 goto out_no_copy;
1349 cur_bo_offset = 0;
1350 cur_offset = srf->offsets;
1351 cur_size = srf->sizes;
1353 bpp = vmw_sf_bpp[srf->format].bpp;
1354 stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
1356 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
1357 for (j = 0; j < srf->mip_levels[i]; ++j) {
1358 uint32_t stride =
1359 (cur_size->width * stride_bpp + 7) >> 3;
1361 cur_offset->face = i;
1362 cur_offset->mip = j;
1363 cur_offset->bo_offset = cur_bo_offset;
1364 cur_bo_offset += stride * cur_size->height *
1365 cur_size->depth * bpp / stride_bpp;
1366 ++cur_offset;
1367 ++cur_size;
1370 srf->backup_size = cur_bo_offset;
1372 if (srf->scanout &&
1373 srf->num_sizes == 1 &&
1374 srf->sizes[0].width == 64 &&
1375 srf->sizes[0].height == 64 &&
1376 srf->format == SVGA3D_A8R8G8B8) {
1378 /* allocate image area and clear it */
1379 srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
1380 if (!srf->snooper.image) {
1381 DRM_ERROR("Failed to allocate cursor_image\n");
1382 ret = -ENOMEM;
1383 goto out_no_copy;
1385 } else {
1386 srf->snooper.image = NULL;
1388 srf->snooper.crtc = NULL;
1390 user_srf->base.shareable = false;
1391 user_srf->base.tfile = NULL;
1394 * From this point, the generic resource management functions
1395 * destroy the object on failure.
1398 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
1399 if (unlikely(ret != 0))
1400 goto out_unlock;
1402 tmp = vmw_resource_reference(&srf->res);
1403 ret = ttm_base_object_init(tfile, &user_srf->base,
1404 req->shareable, VMW_RES_SURFACE,
1405 &vmw_user_surface_base_release, NULL);
1407 if (unlikely(ret != 0)) {
1408 vmw_resource_unreference(&tmp);
1409 vmw_resource_unreference(&res);
1410 goto out_unlock;
1413 rep->sid = user_srf->base.hash.key;
1414 if (rep->sid == SVGA3D_INVALID_ID)
1415 DRM_ERROR("Created bad Surface ID.\n");
1417 vmw_resource_unreference(&res);
1419 ttm_read_unlock(&vmaster->lock);
1420 return 0;
1421 out_no_copy:
1422 kfree(srf->offsets);
1423 out_no_offsets:
1424 kfree(srf->sizes);
1425 out_no_sizes:
1426 kfree(user_srf);
1427 out_no_user_srf:
1428 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
1429 out_unlock:
1430 ttm_read_unlock(&vmaster->lock);
1431 return ret;
1434 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
1435 struct drm_file *file_priv)
1437 union drm_vmw_surface_reference_arg *arg =
1438 (union drm_vmw_surface_reference_arg *)data;
1439 struct drm_vmw_surface_arg *req = &arg->req;
1440 struct drm_vmw_surface_create_req *rep = &arg->rep;
1441 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1442 struct vmw_surface *srf;
1443 struct vmw_user_surface *user_srf;
1444 struct drm_vmw_size __user *user_sizes;
1445 struct ttm_base_object *base;
1446 int ret = -EINVAL;
1448 base = ttm_base_object_lookup(tfile, req->sid);
1449 if (unlikely(base == NULL)) {
1450 DRM_ERROR("Could not find surface to reference.\n");
1451 return -EINVAL;
1454 if (unlikely(base->object_type != VMW_RES_SURFACE))
1455 goto out_bad_resource;
1457 user_srf = container_of(base, struct vmw_user_surface, base);
1458 srf = &user_srf->srf;
1460 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
1461 if (unlikely(ret != 0)) {
1462 DRM_ERROR("Could not add a reference to a surface.\n");
1463 goto out_no_reference;
1466 rep->flags = srf->flags;
1467 rep->format = srf->format;
1468 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
1469 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
1470 rep->size_addr;
1472 if (user_sizes)
1473 ret = copy_to_user(user_sizes, srf->sizes,
1474 srf->num_sizes * sizeof(*srf->sizes));
1475 if (unlikely(ret != 0)) {
1476 DRM_ERROR("copy_to_user failed %p %u\n",
1477 user_sizes, srf->num_sizes);
1478 ret = -EFAULT;
1480 out_bad_resource:
1481 out_no_reference:
1482 ttm_base_object_unref(&base);
1484 return ret;
1487 int vmw_surface_check(struct vmw_private *dev_priv,
1488 struct ttm_object_file *tfile,
1489 uint32_t handle, int *id)
1491 struct ttm_base_object *base;
1492 struct vmw_user_surface *user_srf;
1494 int ret = -EPERM;
1496 base = ttm_base_object_lookup(tfile, handle);
1497 if (unlikely(base == NULL))
1498 return -EINVAL;
1500 if (unlikely(base->object_type != VMW_RES_SURFACE))
1501 goto out_bad_surface;
1503 user_srf = container_of(base, struct vmw_user_surface, base);
1504 *id = user_srf->srf.res.id;
1505 ret = 0;
1507 out_bad_surface:
1509 * FIXME: May deadlock here when called from the
1510 * command parsing code.
1513 ttm_base_object_unref(&base);
1514 return ret;
1518 * Buffer management.
1521 static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
1522 unsigned long num_pages)
1524 static size_t bo_user_size = ~0;
1526 size_t page_array_size =
1527 (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
1529 if (unlikely(bo_user_size == ~0)) {
1530 bo_user_size = glob->ttm_bo_extra_size +
1531 ttm_round_pot(sizeof(struct vmw_dma_buffer));
1534 return bo_user_size + page_array_size;
1537 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
1539 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
1540 struct ttm_bo_global *glob = bo->glob;
1542 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
1543 kfree(vmw_bo);
1546 int vmw_dmabuf_init(struct vmw_private *dev_priv,
1547 struct vmw_dma_buffer *vmw_bo,
1548 size_t size, struct ttm_placement *placement,
1549 bool interruptible,
1550 void (*bo_free) (struct ttm_buffer_object *bo))
1552 struct ttm_bo_device *bdev = &dev_priv->bdev;
1553 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1554 size_t acc_size;
1555 int ret;
1557 BUG_ON(!bo_free);
1559 acc_size =
1560 vmw_dmabuf_acc_size(bdev->glob,
1561 (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1563 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1564 if (unlikely(ret != 0)) {
1565 /* we must free the bo here as
1566 * ttm_buffer_object_init does so as well */
1567 bo_free(&vmw_bo->base);
1568 return ret;
1571 memset(vmw_bo, 0, sizeof(*vmw_bo));
1573 INIT_LIST_HEAD(&vmw_bo->validate_list);
1575 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
1576 ttm_bo_type_device, placement,
1577 0, 0, interruptible,
1578 NULL, acc_size, bo_free);
1579 return ret;
1582 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
1584 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
1585 struct ttm_bo_global *glob = bo->glob;
1587 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
1588 kfree(vmw_user_bo);
1591 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
1593 struct vmw_user_dma_buffer *vmw_user_bo;
1594 struct ttm_base_object *base = *p_base;
1595 struct ttm_buffer_object *bo;
1597 *p_base = NULL;
1599 if (unlikely(base == NULL))
1600 return;
1602 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
1603 bo = &vmw_user_bo->dma.base;
1604 ttm_bo_unref(&bo);
1607 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
1608 struct drm_file *file_priv)
1610 struct vmw_private *dev_priv = vmw_priv(dev);
1611 union drm_vmw_alloc_dmabuf_arg *arg =
1612 (union drm_vmw_alloc_dmabuf_arg *)data;
1613 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
1614 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
1615 struct vmw_user_dma_buffer *vmw_user_bo;
1616 struct ttm_buffer_object *tmp;
1617 struct vmw_master *vmaster = vmw_master(file_priv->master);
1618 int ret;
1620 vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
1621 if (unlikely(vmw_user_bo == NULL))
1622 return -ENOMEM;
1624 ret = ttm_read_lock(&vmaster->lock, true);
1625 if (unlikely(ret != 0)) {
1626 kfree(vmw_user_bo);
1627 return ret;
1630 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
1631 &vmw_vram_sys_placement, true,
1632 &vmw_user_dmabuf_destroy);
1633 if (unlikely(ret != 0))
1634 goto out_no_dmabuf;
1636 tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
1637 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
1638 &vmw_user_bo->base,
1639 false,
1640 ttm_buffer_type,
1641 &vmw_user_dmabuf_release, NULL);
1642 if (unlikely(ret != 0))
1643 goto out_no_base_object;
1644 else {
1645 rep->handle = vmw_user_bo->base.hash.key;
1646 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
1647 rep->cur_gmr_id = vmw_user_bo->base.hash.key;
1648 rep->cur_gmr_offset = 0;
1651 out_no_base_object:
1652 ttm_bo_unref(&tmp);
1653 out_no_dmabuf:
1654 ttm_read_unlock(&vmaster->lock);
1656 return ret;
1659 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
1660 struct drm_file *file_priv)
1662 struct drm_vmw_unref_dmabuf_arg *arg =
1663 (struct drm_vmw_unref_dmabuf_arg *)data;
1665 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1666 arg->handle,
1667 TTM_REF_USAGE);
1670 uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
1671 uint32_t cur_validate_node)
1673 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
1675 if (likely(vmw_bo->on_validate_list))
1676 return vmw_bo->cur_validate_node;
1678 vmw_bo->cur_validate_node = cur_validate_node;
1679 vmw_bo->on_validate_list = true;
1681 return cur_validate_node;
1684 void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
1686 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
1688 vmw_bo->on_validate_list = false;
1691 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
1692 uint32_t handle, struct vmw_dma_buffer **out)
1694 struct vmw_user_dma_buffer *vmw_user_bo;
1695 struct ttm_base_object *base;
1697 base = ttm_base_object_lookup(tfile, handle);
1698 if (unlikely(base == NULL)) {
1699 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
1700 (unsigned long)handle);
1701 return -ESRCH;
1704 if (unlikely(base->object_type != ttm_buffer_type)) {
1705 ttm_base_object_unref(&base);
1706 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
1707 (unsigned long)handle);
1708 return -EINVAL;
1711 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
1712 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
1713 ttm_base_object_unref(&base);
1714 *out = &vmw_user_bo->dma;
1716 return 0;
1720 * Stream management
1723 static void vmw_stream_destroy(struct vmw_resource *res)
1725 struct vmw_private *dev_priv = res->dev_priv;
1726 struct vmw_stream *stream;
1727 int ret;
1729 DRM_INFO("%s: unref\n", __func__);
1730 stream = container_of(res, struct vmw_stream, res);
1732 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
1733 WARN_ON(ret != 0);
1736 static int vmw_stream_init(struct vmw_private *dev_priv,
1737 struct vmw_stream *stream,
1738 void (*res_free) (struct vmw_resource *res))
1740 struct vmw_resource *res = &stream->res;
1741 int ret;
1743 ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
1744 VMW_RES_STREAM, false, res_free, NULL);
1746 if (unlikely(ret != 0)) {
1747 if (res_free == NULL)
1748 kfree(stream);
1749 else
1750 res_free(&stream->res);
1751 return ret;
1754 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
1755 if (ret) {
1756 vmw_resource_unreference(&res);
1757 return ret;
1760 DRM_INFO("%s: claimed\n", __func__);
1762 vmw_resource_activate(&stream->res, vmw_stream_destroy);
1763 return 0;
1767 * User-space context management:
1770 static void vmw_user_stream_free(struct vmw_resource *res)
1772 struct vmw_user_stream *stream =
1773 container_of(res, struct vmw_user_stream, stream.res);
1774 struct vmw_private *dev_priv = res->dev_priv;
1776 kfree(stream);
1777 ttm_mem_global_free(vmw_mem_glob(dev_priv),
1778 vmw_user_stream_size);
1782 * This function is called when user space has no more references on the
1783 * base object. It releases the base-object's reference on the resource object.
1786 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
1788 struct ttm_base_object *base = *p_base;
1789 struct vmw_user_stream *stream =
1790 container_of(base, struct vmw_user_stream, base);
1791 struct vmw_resource *res = &stream->stream.res;
1793 *p_base = NULL;
1794 vmw_resource_unreference(&res);
1797 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1798 struct drm_file *file_priv)
1800 struct vmw_private *dev_priv = vmw_priv(dev);
1801 struct vmw_resource *res;
1802 struct vmw_user_stream *stream;
1803 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1804 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1805 int ret = 0;
1807 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
1808 if (unlikely(res == NULL))
1809 return -EINVAL;
1811 if (res->res_free != &vmw_user_stream_free) {
1812 ret = -EINVAL;
1813 goto out;
1816 stream = container_of(res, struct vmw_user_stream, stream.res);
1817 if (stream->base.tfile != tfile) {
1818 ret = -EINVAL;
1819 goto out;
1822 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
1823 out:
1824 vmw_resource_unreference(&res);
1825 return ret;
1828 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1829 struct drm_file *file_priv)
1831 struct vmw_private *dev_priv = vmw_priv(dev);
1832 struct vmw_user_stream *stream;
1833 struct vmw_resource *res;
1834 struct vmw_resource *tmp;
1835 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1836 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1837 struct vmw_master *vmaster = vmw_master(file_priv->master);
1838 int ret;
1841 * Approximate idr memory usage with 128 bytes. It will be limited
1842 * by maximum number_of streams anyway?
1845 if (unlikely(vmw_user_stream_size == 0))
1846 vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
1848 ret = ttm_read_lock(&vmaster->lock, true);
1849 if (unlikely(ret != 0))
1850 return ret;
1852 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
1853 vmw_user_stream_size,
1854 false, true);
1855 if (unlikely(ret != 0)) {
1856 if (ret != -ERESTARTSYS)
1857 DRM_ERROR("Out of graphics memory for stream"
1858 " creation.\n");
1859 goto out_unlock;
1863 stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1864 if (unlikely(stream == NULL)) {
1865 ttm_mem_global_free(vmw_mem_glob(dev_priv),
1866 vmw_user_stream_size);
1867 ret = -ENOMEM;
1868 goto out_unlock;
1871 res = &stream->stream.res;
1872 stream->base.shareable = false;
1873 stream->base.tfile = NULL;
1876 * From here on, the destructor takes over resource freeing.
1879 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1880 if (unlikely(ret != 0))
1881 goto out_unlock;
1883 tmp = vmw_resource_reference(res);
1884 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
1885 &vmw_user_stream_base_release, NULL);
1887 if (unlikely(ret != 0)) {
1888 vmw_resource_unreference(&tmp);
1889 goto out_err;
1892 arg->stream_id = res->id;
1893 out_err:
1894 vmw_resource_unreference(&res);
1895 out_unlock:
1896 ttm_read_unlock(&vmaster->lock);
1897 return ret;
1900 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1901 struct ttm_object_file *tfile,
1902 uint32_t *inout_id, struct vmw_resource **out)
1904 struct vmw_user_stream *stream;
1905 struct vmw_resource *res;
1906 int ret;
1908 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
1909 if (unlikely(res == NULL))
1910 return -EINVAL;
1912 if (res->res_free != &vmw_user_stream_free) {
1913 ret = -EINVAL;
1914 goto err_ref;
1917 stream = container_of(res, struct vmw_user_stream, stream.res);
1918 if (stream->base.tfile != tfile) {
1919 ret = -EPERM;
1920 goto err_ref;
1923 *inout_id = stream->stream.stream_id;
1924 *out = res;
1925 return 0;
1926 err_ref:
1927 vmw_resource_unreference(&res);
1928 return ret;