1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_object.h>
31 #include <drm/ttm/ttm_placement.h>
33 #include "vmwgfx_resource_priv.h"
35 #define VMW_RES_EVICT_ERR_COUNT 10
37 struct vmw_user_dma_buffer
{
38 struct ttm_base_object base
;
39 struct vmw_dma_buffer dma
;
42 struct vmw_bo_user_rep
{
48 struct vmw_resource res
;
52 struct vmw_user_stream
{
53 struct ttm_base_object base
;
54 struct vmw_stream stream
;
58 static uint64_t vmw_user_stream_size
;
60 static const struct vmw_res_func vmw_stream_func
= {
61 .res_type
= vmw_res_stream
,
62 .needs_backup
= false,
64 .type_name
= "video streams",
65 .backup_placement
= NULL
,
72 static inline struct vmw_dma_buffer
*
73 vmw_dma_buffer(struct ttm_buffer_object
*bo
)
75 return container_of(bo
, struct vmw_dma_buffer
, base
);
78 static inline struct vmw_user_dma_buffer
*
79 vmw_user_dma_buffer(struct ttm_buffer_object
*bo
)
81 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
82 return container_of(vmw_bo
, struct vmw_user_dma_buffer
, dma
);
85 struct vmw_resource
*vmw_resource_reference(struct vmw_resource
*res
)
93 * vmw_resource_release_id - release a resource id to the id manager.
95 * @res: Pointer to the resource.
97 * Release the resource id to the resource id manager and set it to -1
99 void vmw_resource_release_id(struct vmw_resource
*res
)
101 struct vmw_private
*dev_priv
= res
->dev_priv
;
102 struct idr
*idr
= &dev_priv
->res_idr
[res
->func
->res_type
];
104 write_lock(&dev_priv
->resource_lock
);
106 idr_remove(idr
, res
->id
);
108 write_unlock(&dev_priv
->resource_lock
);
111 static void vmw_resource_release(struct kref
*kref
)
113 struct vmw_resource
*res
=
114 container_of(kref
, struct vmw_resource
, kref
);
115 struct vmw_private
*dev_priv
= res
->dev_priv
;
117 struct idr
*idr
= &dev_priv
->res_idr
[res
->func
->res_type
];
120 list_del_init(&res
->lru_head
);
121 write_unlock(&dev_priv
->resource_lock
);
123 struct ttm_buffer_object
*bo
= &res
->backup
->base
;
125 ttm_bo_reserve(bo
, false, false, false, 0);
126 if (!list_empty(&res
->mob_head
) &&
127 res
->func
->unbind
!= NULL
) {
128 struct ttm_validate_buffer val_buf
;
131 res
->func
->unbind(res
, false, &val_buf
);
133 res
->backup_dirty
= false;
134 list_del_init(&res
->mob_head
);
135 ttm_bo_unreserve(bo
);
136 vmw_dmabuf_unreference(&res
->backup
);
139 if (likely(res
->hw_destroy
!= NULL
))
140 res
->hw_destroy(res
);
143 if (res
->res_free
!= NULL
)
148 write_lock(&dev_priv
->resource_lock
);
154 void vmw_resource_unreference(struct vmw_resource
**p_res
)
156 struct vmw_resource
*res
= *p_res
;
157 struct vmw_private
*dev_priv
= res
->dev_priv
;
160 write_lock(&dev_priv
->resource_lock
);
161 kref_put(&res
->kref
, vmw_resource_release
);
162 write_unlock(&dev_priv
->resource_lock
);
167 * vmw_resource_alloc_id - release a resource id to the id manager.
169 * @res: Pointer to the resource.
171 * Allocate the lowest free resource from the resource manager, and set
172 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
174 int vmw_resource_alloc_id(struct vmw_resource
*res
)
176 struct vmw_private
*dev_priv
= res
->dev_priv
;
178 struct idr
*idr
= &dev_priv
->res_idr
[res
->func
->res_type
];
180 BUG_ON(res
->id
!= -1);
182 idr_preload(GFP_KERNEL
);
183 write_lock(&dev_priv
->resource_lock
);
185 ret
= idr_alloc(idr
, res
, 1, 0, GFP_NOWAIT
);
189 write_unlock(&dev_priv
->resource_lock
);
191 return ret
< 0 ? ret
: 0;
195 * vmw_resource_init - initialize a struct vmw_resource
197 * @dev_priv: Pointer to a device private struct.
198 * @res: The struct vmw_resource to initialize.
199 * @obj_type: Resource object type.
200 * @delay_id: Boolean whether to defer device id allocation until
201 * the first validation.
202 * @res_free: Resource destructor.
203 * @func: Resource function table.
205 int vmw_resource_init(struct vmw_private
*dev_priv
, struct vmw_resource
*res
,
207 void (*res_free
) (struct vmw_resource
*res
),
208 const struct vmw_res_func
*func
)
210 kref_init(&res
->kref
);
211 res
->hw_destroy
= NULL
;
212 res
->res_free
= res_free
;
214 res
->dev_priv
= dev_priv
;
216 INIT_LIST_HEAD(&res
->lru_head
);
217 INIT_LIST_HEAD(&res
->mob_head
);
220 res
->backup_offset
= 0;
221 res
->backup_dirty
= false;
222 res
->res_dirty
= false;
226 return vmw_resource_alloc_id(res
);
230 * vmw_resource_activate
232 * @res: Pointer to the newly created resource
233 * @hw_destroy: Destroy function. NULL if none.
235 * Activate a resource after the hardware has been made aware of it.
236 * Set tye destroy function to @destroy. Typically this frees the
237 * resource and destroys the hardware resources associated with it.
238 * Activate basically means that the function vmw_resource_lookup will
241 void vmw_resource_activate(struct vmw_resource
*res
,
242 void (*hw_destroy
) (struct vmw_resource
*))
244 struct vmw_private
*dev_priv
= res
->dev_priv
;
246 write_lock(&dev_priv
->resource_lock
);
248 res
->hw_destroy
= hw_destroy
;
249 write_unlock(&dev_priv
->resource_lock
);
252 struct vmw_resource
*vmw_resource_lookup(struct vmw_private
*dev_priv
,
253 struct idr
*idr
, int id
)
255 struct vmw_resource
*res
;
257 read_lock(&dev_priv
->resource_lock
);
258 res
= idr_find(idr
, id
);
259 if (res
&& res
->avail
)
260 kref_get(&res
->kref
);
263 read_unlock(&dev_priv
->resource_lock
);
265 if (unlikely(res
== NULL
))
272 * vmw_user_resource_lookup_handle - lookup a struct resource from a
273 * TTM user-space handle and perform basic type checks
275 * @dev_priv: Pointer to a device private struct
276 * @tfile: Pointer to a struct ttm_object_file identifying the caller
277 * @handle: The TTM user-space handle
278 * @converter: Pointer to an object describing the resource type
279 * @p_res: On successful return the location pointed to will contain
280 * a pointer to a refcounted struct vmw_resource.
282 * If the handle can't be found or is associated with an incorrect resource
283 * type, -EINVAL will be returned.
285 int vmw_user_resource_lookup_handle(struct vmw_private
*dev_priv
,
286 struct ttm_object_file
*tfile
,
288 const struct vmw_user_resource_conv
290 struct vmw_resource
**p_res
)
292 struct ttm_base_object
*base
;
293 struct vmw_resource
*res
;
296 base
= ttm_base_object_lookup(tfile
, handle
);
297 if (unlikely(base
== NULL
))
300 if (unlikely(base
->object_type
!= converter
->object_type
))
301 goto out_bad_resource
;
303 res
= converter
->base_obj_to_res(base
);
305 read_lock(&dev_priv
->resource_lock
);
306 if (!res
->avail
|| res
->res_free
!= converter
->res_free
) {
307 read_unlock(&dev_priv
->resource_lock
);
308 goto out_bad_resource
;
311 kref_get(&res
->kref
);
312 read_unlock(&dev_priv
->resource_lock
);
318 ttm_base_object_unref(&base
);
324 * Helper function that looks either a surface or dmabuf.
326 * The pointer this pointed at by out_surf and out_buf needs to be null.
328 int vmw_user_lookup_handle(struct vmw_private
*dev_priv
,
329 struct ttm_object_file
*tfile
,
331 struct vmw_surface
**out_surf
,
332 struct vmw_dma_buffer
**out_buf
)
334 struct vmw_resource
*res
;
337 BUG_ON(*out_surf
|| *out_buf
);
339 ret
= vmw_user_resource_lookup_handle(dev_priv
, tfile
, handle
,
340 user_surface_converter
,
343 *out_surf
= vmw_res_to_srf(res
);
348 ret
= vmw_user_dmabuf_lookup(tfile
, handle
, out_buf
);
355 void vmw_dmabuf_bo_free(struct ttm_buffer_object
*bo
)
357 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
362 int vmw_dmabuf_init(struct vmw_private
*dev_priv
,
363 struct vmw_dma_buffer
*vmw_bo
,
364 size_t size
, struct ttm_placement
*placement
,
366 void (*bo_free
) (struct ttm_buffer_object
*bo
))
368 struct ttm_bo_device
*bdev
= &dev_priv
->bdev
;
374 acc_size
= ttm_bo_acc_size(bdev
, size
, sizeof(struct vmw_dma_buffer
));
375 memset(vmw_bo
, 0, sizeof(*vmw_bo
));
377 INIT_LIST_HEAD(&vmw_bo
->res_list
);
379 ret
= ttm_bo_init(bdev
, &vmw_bo
->base
, size
,
380 ttm_bo_type_device
, placement
,
382 NULL
, acc_size
, NULL
, bo_free
);
386 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object
*bo
)
388 struct vmw_user_dma_buffer
*vmw_user_bo
= vmw_user_dma_buffer(bo
);
390 ttm_base_object_kfree(vmw_user_bo
, base
);
393 static void vmw_user_dmabuf_release(struct ttm_base_object
**p_base
)
395 struct vmw_user_dma_buffer
*vmw_user_bo
;
396 struct ttm_base_object
*base
= *p_base
;
397 struct ttm_buffer_object
*bo
;
401 if (unlikely(base
== NULL
))
404 vmw_user_bo
= container_of(base
, struct vmw_user_dma_buffer
, base
);
405 bo
= &vmw_user_bo
->dma
.base
;
410 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
412 * @dev_priv: Pointer to a struct device private.
413 * @tfile: Pointer to a struct ttm_object_file on which to register the user
415 * @size: Size of the dma buffer.
416 * @shareable: Boolean whether the buffer is shareable with other open files.
417 * @handle: Pointer to where the handle value should be assigned.
418 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
419 * should be assigned.
421 int vmw_user_dmabuf_alloc(struct vmw_private
*dev_priv
,
422 struct ttm_object_file
*tfile
,
426 struct vmw_dma_buffer
**p_dma_buf
)
428 struct vmw_user_dma_buffer
*user_bo
;
429 struct ttm_buffer_object
*tmp
;
432 user_bo
= kzalloc(sizeof(*user_bo
), GFP_KERNEL
);
433 if (unlikely(user_bo
== NULL
)) {
434 DRM_ERROR("Failed to allocate a buffer.\n");
438 ret
= vmw_dmabuf_init(dev_priv
, &user_bo
->dma
, size
,
439 &vmw_vram_sys_placement
, true,
440 &vmw_user_dmabuf_destroy
);
441 if (unlikely(ret
!= 0))
444 tmp
= ttm_bo_reference(&user_bo
->dma
.base
);
445 ret
= ttm_base_object_init(tfile
,
449 &vmw_user_dmabuf_release
, NULL
);
450 if (unlikely(ret
!= 0)) {
452 goto out_no_base_object
;
455 *p_dma_buf
= &user_bo
->dma
;
456 *handle
= user_bo
->base
.hash
.key
;
463 * vmw_user_dmabuf_verify_access - verify access permissions on this
466 * @bo: Pointer to the buffer object being accessed
467 * @tfile: Identifying the caller.
469 int vmw_user_dmabuf_verify_access(struct ttm_buffer_object
*bo
,
470 struct ttm_object_file
*tfile
)
472 struct vmw_user_dma_buffer
*vmw_user_bo
;
474 if (unlikely(bo
->destroy
!= vmw_user_dmabuf_destroy
))
477 vmw_user_bo
= vmw_user_dma_buffer(bo
);
478 return (vmw_user_bo
->base
.tfile
== tfile
||
479 vmw_user_bo
->base
.shareable
) ? 0 : -EPERM
;
482 int vmw_dmabuf_alloc_ioctl(struct drm_device
*dev
, void *data
,
483 struct drm_file
*file_priv
)
485 struct vmw_private
*dev_priv
= vmw_priv(dev
);
486 union drm_vmw_alloc_dmabuf_arg
*arg
=
487 (union drm_vmw_alloc_dmabuf_arg
*)data
;
488 struct drm_vmw_alloc_dmabuf_req
*req
= &arg
->req
;
489 struct drm_vmw_dmabuf_rep
*rep
= &arg
->rep
;
490 struct vmw_dma_buffer
*dma_buf
;
492 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
495 ret
= ttm_read_lock(&vmaster
->lock
, true);
496 if (unlikely(ret
!= 0))
499 ret
= vmw_user_dmabuf_alloc(dev_priv
, vmw_fpriv(file_priv
)->tfile
,
500 req
->size
, false, &handle
, &dma_buf
);
501 if (unlikely(ret
!= 0))
504 rep
->handle
= handle
;
505 rep
->map_handle
= drm_vma_node_offset_addr(&dma_buf
->base
.vma_node
);
506 rep
->cur_gmr_id
= handle
;
507 rep
->cur_gmr_offset
= 0;
509 vmw_dmabuf_unreference(&dma_buf
);
512 ttm_read_unlock(&vmaster
->lock
);
517 int vmw_dmabuf_unref_ioctl(struct drm_device
*dev
, void *data
,
518 struct drm_file
*file_priv
)
520 struct drm_vmw_unref_dmabuf_arg
*arg
=
521 (struct drm_vmw_unref_dmabuf_arg
*)data
;
523 return ttm_ref_object_base_unref(vmw_fpriv(file_priv
)->tfile
,
528 int vmw_user_dmabuf_lookup(struct ttm_object_file
*tfile
,
529 uint32_t handle
, struct vmw_dma_buffer
**out
)
531 struct vmw_user_dma_buffer
*vmw_user_bo
;
532 struct ttm_base_object
*base
;
534 base
= ttm_base_object_lookup(tfile
, handle
);
535 if (unlikely(base
== NULL
)) {
536 printk(KERN_ERR
"Invalid buffer object handle 0x%08lx.\n",
537 (unsigned long)handle
);
541 if (unlikely(base
->object_type
!= ttm_buffer_type
)) {
542 ttm_base_object_unref(&base
);
543 printk(KERN_ERR
"Invalid buffer object handle 0x%08lx.\n",
544 (unsigned long)handle
);
548 vmw_user_bo
= container_of(base
, struct vmw_user_dma_buffer
, base
);
549 (void)ttm_bo_reference(&vmw_user_bo
->dma
.base
);
550 ttm_base_object_unref(&base
);
551 *out
= &vmw_user_bo
->dma
;
556 int vmw_user_dmabuf_reference(struct ttm_object_file
*tfile
,
557 struct vmw_dma_buffer
*dma_buf
)
559 struct vmw_user_dma_buffer
*user_bo
;
561 if (dma_buf
->base
.destroy
!= vmw_user_dmabuf_destroy
)
564 user_bo
= container_of(dma_buf
, struct vmw_user_dma_buffer
, dma
);
565 return ttm_ref_object_add(tfile
, &user_bo
->base
, TTM_REF_USAGE
, NULL
);
572 static void vmw_stream_destroy(struct vmw_resource
*res
)
574 struct vmw_private
*dev_priv
= res
->dev_priv
;
575 struct vmw_stream
*stream
;
578 DRM_INFO("%s: unref\n", __func__
);
579 stream
= container_of(res
, struct vmw_stream
, res
);
581 ret
= vmw_overlay_unref(dev_priv
, stream
->stream_id
);
585 static int vmw_stream_init(struct vmw_private
*dev_priv
,
586 struct vmw_stream
*stream
,
587 void (*res_free
) (struct vmw_resource
*res
))
589 struct vmw_resource
*res
= &stream
->res
;
592 ret
= vmw_resource_init(dev_priv
, res
, false, res_free
,
595 if (unlikely(ret
!= 0)) {
596 if (res_free
== NULL
)
599 res_free(&stream
->res
);
603 ret
= vmw_overlay_claim(dev_priv
, &stream
->stream_id
);
605 vmw_resource_unreference(&res
);
609 DRM_INFO("%s: claimed\n", __func__
);
611 vmw_resource_activate(&stream
->res
, vmw_stream_destroy
);
615 static void vmw_user_stream_free(struct vmw_resource
*res
)
617 struct vmw_user_stream
*stream
=
618 container_of(res
, struct vmw_user_stream
, stream
.res
);
619 struct vmw_private
*dev_priv
= res
->dev_priv
;
621 ttm_base_object_kfree(stream
, base
);
622 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
623 vmw_user_stream_size
);
627 * This function is called when user space has no more references on the
628 * base object. It releases the base-object's reference on the resource object.
631 static void vmw_user_stream_base_release(struct ttm_base_object
**p_base
)
633 struct ttm_base_object
*base
= *p_base
;
634 struct vmw_user_stream
*stream
=
635 container_of(base
, struct vmw_user_stream
, base
);
636 struct vmw_resource
*res
= &stream
->stream
.res
;
639 vmw_resource_unreference(&res
);
642 int vmw_stream_unref_ioctl(struct drm_device
*dev
, void *data
,
643 struct drm_file
*file_priv
)
645 struct vmw_private
*dev_priv
= vmw_priv(dev
);
646 struct vmw_resource
*res
;
647 struct vmw_user_stream
*stream
;
648 struct drm_vmw_stream_arg
*arg
= (struct drm_vmw_stream_arg
*)data
;
649 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
650 struct idr
*idr
= &dev_priv
->res_idr
[vmw_res_stream
];
654 res
= vmw_resource_lookup(dev_priv
, idr
, arg
->stream_id
);
655 if (unlikely(res
== NULL
))
658 if (res
->res_free
!= &vmw_user_stream_free
) {
663 stream
= container_of(res
, struct vmw_user_stream
, stream
.res
);
664 if (stream
->base
.tfile
!= tfile
) {
669 ttm_ref_object_base_unref(tfile
, stream
->base
.hash
.key
, TTM_REF_USAGE
);
671 vmw_resource_unreference(&res
);
675 int vmw_stream_claim_ioctl(struct drm_device
*dev
, void *data
,
676 struct drm_file
*file_priv
)
678 struct vmw_private
*dev_priv
= vmw_priv(dev
);
679 struct vmw_user_stream
*stream
;
680 struct vmw_resource
*res
;
681 struct vmw_resource
*tmp
;
682 struct drm_vmw_stream_arg
*arg
= (struct drm_vmw_stream_arg
*)data
;
683 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
684 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
688 * Approximate idr memory usage with 128 bytes. It will be limited
689 * by maximum number_of streams anyway?
692 if (unlikely(vmw_user_stream_size
== 0))
693 vmw_user_stream_size
= ttm_round_pot(sizeof(*stream
)) + 128;
695 ret
= ttm_read_lock(&vmaster
->lock
, true);
696 if (unlikely(ret
!= 0))
699 ret
= ttm_mem_global_alloc(vmw_mem_glob(dev_priv
),
700 vmw_user_stream_size
,
702 if (unlikely(ret
!= 0)) {
703 if (ret
!= -ERESTARTSYS
)
704 DRM_ERROR("Out of graphics memory for stream"
710 stream
= kmalloc(sizeof(*stream
), GFP_KERNEL
);
711 if (unlikely(stream
== NULL
)) {
712 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
713 vmw_user_stream_size
);
718 res
= &stream
->stream
.res
;
719 stream
->base
.shareable
= false;
720 stream
->base
.tfile
= NULL
;
723 * From here on, the destructor takes over resource freeing.
726 ret
= vmw_stream_init(dev_priv
, &stream
->stream
, vmw_user_stream_free
);
727 if (unlikely(ret
!= 0))
730 tmp
= vmw_resource_reference(res
);
731 ret
= ttm_base_object_init(tfile
, &stream
->base
, false, VMW_RES_STREAM
,
732 &vmw_user_stream_base_release
, NULL
);
734 if (unlikely(ret
!= 0)) {
735 vmw_resource_unreference(&tmp
);
739 arg
->stream_id
= res
->id
;
741 vmw_resource_unreference(&res
);
743 ttm_read_unlock(&vmaster
->lock
);
747 int vmw_user_stream_lookup(struct vmw_private
*dev_priv
,
748 struct ttm_object_file
*tfile
,
749 uint32_t *inout_id
, struct vmw_resource
**out
)
751 struct vmw_user_stream
*stream
;
752 struct vmw_resource
*res
;
755 res
= vmw_resource_lookup(dev_priv
, &dev_priv
->res_idr
[vmw_res_stream
],
757 if (unlikely(res
== NULL
))
760 if (res
->res_free
!= &vmw_user_stream_free
) {
765 stream
= container_of(res
, struct vmw_user_stream
, stream
.res
);
766 if (stream
->base
.tfile
!= tfile
) {
771 *inout_id
= stream
->stream
.stream_id
;
775 vmw_resource_unreference(&res
);
780 int vmw_dumb_create(struct drm_file
*file_priv
,
781 struct drm_device
*dev
,
782 struct drm_mode_create_dumb
*args
)
784 struct vmw_private
*dev_priv
= vmw_priv(dev
);
785 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
786 struct vmw_user_dma_buffer
*vmw_user_bo
;
787 struct ttm_buffer_object
*tmp
;
790 args
->pitch
= args
->width
* ((args
->bpp
+ 7) / 8);
791 args
->size
= args
->pitch
* args
->height
;
793 vmw_user_bo
= kzalloc(sizeof(*vmw_user_bo
), GFP_KERNEL
);
794 if (vmw_user_bo
== NULL
)
797 ret
= ttm_read_lock(&vmaster
->lock
, true);
803 ret
= vmw_dmabuf_init(dev_priv
, &vmw_user_bo
->dma
, args
->size
,
804 &vmw_vram_sys_placement
, true,
805 &vmw_user_dmabuf_destroy
);
809 tmp
= ttm_bo_reference(&vmw_user_bo
->dma
.base
);
810 ret
= ttm_base_object_init(vmw_fpriv(file_priv
)->tfile
,
814 &vmw_user_dmabuf_release
, NULL
);
815 if (unlikely(ret
!= 0))
816 goto out_no_base_object
;
818 args
->handle
= vmw_user_bo
->base
.hash
.key
;
823 ttm_read_unlock(&vmaster
->lock
);
827 int vmw_dumb_map_offset(struct drm_file
*file_priv
,
828 struct drm_device
*dev
, uint32_t handle
,
831 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
832 struct vmw_dma_buffer
*out_buf
;
835 ret
= vmw_user_dmabuf_lookup(tfile
, handle
, &out_buf
);
839 *offset
= drm_vma_node_offset_addr(&out_buf
->base
.vma_node
);
840 vmw_dmabuf_unreference(&out_buf
);
844 int vmw_dumb_destroy(struct drm_file
*file_priv
,
845 struct drm_device
*dev
,
848 return ttm_ref_object_base_unref(vmw_fpriv(file_priv
)->tfile
,
849 handle
, TTM_REF_USAGE
);
853 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
855 * @res: The resource for which to allocate a backup buffer.
856 * @interruptible: Whether any sleeps during allocation should be
857 * performed while interruptible.
859 static int vmw_resource_buf_alloc(struct vmw_resource
*res
,
863 (res
->backup_size
+ PAGE_SIZE
- 1) & PAGE_MASK
;
864 struct vmw_dma_buffer
*backup
;
867 if (likely(res
->backup
)) {
868 BUG_ON(res
->backup
->base
.num_pages
* PAGE_SIZE
< size
);
872 backup
= kzalloc(sizeof(*backup
), GFP_KERNEL
);
873 if (unlikely(backup
== NULL
))
876 ret
= vmw_dmabuf_init(res
->dev_priv
, backup
, res
->backup_size
,
877 res
->func
->backup_placement
,
879 &vmw_dmabuf_bo_free
);
880 if (unlikely(ret
!= 0))
883 res
->backup
= backup
;
890 * vmw_resource_do_validate - Make a resource up-to-date and visible
893 * @res: The resource to make visible to the device.
894 * @val_buf: Information about a buffer possibly
895 * containing backup data if a bind operation is needed.
897 * On hardware resource shortage, this function returns -EBUSY and
898 * should be retried once resources have been freed up.
900 static int vmw_resource_do_validate(struct vmw_resource
*res
,
901 struct ttm_validate_buffer
*val_buf
)
904 const struct vmw_res_func
*func
= res
->func
;
906 if (unlikely(res
->id
== -1)) {
907 ret
= func
->create(res
);
908 if (unlikely(ret
!= 0))
913 ((func
->needs_backup
&& list_empty(&res
->mob_head
) &&
914 val_buf
->bo
!= NULL
) ||
915 (!func
->needs_backup
&& val_buf
->bo
!= NULL
))) {
916 ret
= func
->bind(res
, val_buf
);
917 if (unlikely(ret
!= 0))
918 goto out_bind_failed
;
919 if (func
->needs_backup
)
920 list_add_tail(&res
->mob_head
, &res
->backup
->res_list
);
924 * Only do this on write operations, and move to
925 * vmw_resource_unreserve if it can be called after
926 * backup buffers have been unreserved. Otherwise
929 res
->res_dirty
= true;
940 * vmw_resource_unreserve - Unreserve a resource previously reserved for
941 * command submission.
943 * @res: Pointer to the struct vmw_resource to unreserve.
944 * @new_backup: Pointer to new backup buffer if command submission
946 * @new_backup_offset: New backup offset if @new_backup is !NULL.
948 * Currently unreserving a resource means putting it back on the device's
949 * resource lru list, so that it can be evicted if necessary.
951 void vmw_resource_unreserve(struct vmw_resource
*res
,
952 struct vmw_dma_buffer
*new_backup
,
953 unsigned long new_backup_offset
)
955 struct vmw_private
*dev_priv
= res
->dev_priv
;
957 if (!list_empty(&res
->lru_head
))
960 if (new_backup
&& new_backup
!= res
->backup
) {
963 lockdep_assert_held(&res
->backup
->base
.resv
->lock
.base
);
964 list_del_init(&res
->mob_head
);
965 vmw_dmabuf_unreference(&res
->backup
);
968 res
->backup
= vmw_dmabuf_reference(new_backup
);
969 lockdep_assert_held(&new_backup
->base
.resv
->lock
.base
);
970 list_add_tail(&res
->mob_head
, &new_backup
->res_list
);
973 res
->backup_offset
= new_backup_offset
;
975 if (!res
->func
->may_evict
|| res
->id
== -1)
978 write_lock(&dev_priv
->resource_lock
);
979 list_add_tail(&res
->lru_head
,
980 &res
->dev_priv
->res_lru
[res
->func
->res_type
]);
981 write_unlock(&dev_priv
->resource_lock
);
985 * vmw_resource_check_buffer - Check whether a backup buffer is needed
986 * for a resource and in that case, allocate
987 * one, reserve and validate it.
989 * @res: The resource for which to allocate a backup buffer.
990 * @interruptible: Whether any sleeps during allocation should be
991 * performed while interruptible.
992 * @val_buf: On successful return contains data about the
993 * reserved and validated backup buffer.
996 vmw_resource_check_buffer(struct vmw_resource
*res
,
997 struct ww_acquire_ctx
*ticket
,
999 struct ttm_validate_buffer
*val_buf
)
1001 struct list_head val_list
;
1002 bool backup_dirty
= false;
1005 if (unlikely(res
->backup
== NULL
)) {
1006 ret
= vmw_resource_buf_alloc(res
, interruptible
);
1007 if (unlikely(ret
!= 0))
1011 INIT_LIST_HEAD(&val_list
);
1012 val_buf
->bo
= ttm_bo_reference(&res
->backup
->base
);
1013 list_add_tail(&val_buf
->head
, &val_list
);
1014 ret
= ttm_eu_reserve_buffers(ticket
, &val_list
);
1015 if (unlikely(ret
!= 0))
1016 goto out_no_reserve
;
1018 if (res
->func
->needs_backup
&& list_empty(&res
->mob_head
))
1021 backup_dirty
= res
->backup_dirty
;
1022 ret
= ttm_bo_validate(&res
->backup
->base
,
1023 res
->func
->backup_placement
,
1026 if (unlikely(ret
!= 0))
1027 goto out_no_validate
;
1032 ttm_eu_backoff_reservation(ticket
, &val_list
);
1034 ttm_bo_unref(&val_buf
->bo
);
1036 vmw_dmabuf_unreference(&res
->backup
);
1042 * vmw_resource_reserve - Reserve a resource for command submission
1044 * @res: The resource to reserve.
1046 * This function takes the resource off the LRU list and make sure
1047 * a backup buffer is present for guest-backed resources. However,
1048 * the buffer may not be bound to the resource at this point.
1051 int vmw_resource_reserve(struct vmw_resource
*res
, bool no_backup
)
1053 struct vmw_private
*dev_priv
= res
->dev_priv
;
1056 write_lock(&dev_priv
->resource_lock
);
1057 list_del_init(&res
->lru_head
);
1058 write_unlock(&dev_priv
->resource_lock
);
1060 if (res
->func
->needs_backup
&& res
->backup
== NULL
&&
1062 ret
= vmw_resource_buf_alloc(res
, true);
1063 if (unlikely(ret
!= 0))
1071 * vmw_resource_backoff_reservation - Unreserve and unreference a
1074 * @val_buf: Backup buffer information.
1077 vmw_resource_backoff_reservation(struct ww_acquire_ctx
*ticket
,
1078 struct ttm_validate_buffer
*val_buf
)
1080 struct list_head val_list
;
1082 if (likely(val_buf
->bo
== NULL
))
1085 INIT_LIST_HEAD(&val_list
);
1086 list_add_tail(&val_buf
->head
, &val_list
);
1087 ttm_eu_backoff_reservation(ticket
, &val_list
);
1088 ttm_bo_unref(&val_buf
->bo
);
1092 * vmw_resource_do_evict - Evict a resource, and transfer its data
1093 * to a backup buffer.
1095 * @res: The resource to evict.
1096 * @interruptible: Whether to wait interruptible.
1098 int vmw_resource_do_evict(struct vmw_resource
*res
, bool interruptible
)
1100 struct ttm_validate_buffer val_buf
;
1101 const struct vmw_res_func
*func
= res
->func
;
1102 struct ww_acquire_ctx ticket
;
1105 BUG_ON(!func
->may_evict
);
1108 ret
= vmw_resource_check_buffer(res
, &ticket
, interruptible
,
1110 if (unlikely(ret
!= 0))
1113 if (unlikely(func
->unbind
!= NULL
&&
1114 (!func
->needs_backup
|| !list_empty(&res
->mob_head
)))) {
1115 ret
= func
->unbind(res
, res
->res_dirty
, &val_buf
);
1116 if (unlikely(ret
!= 0))
1118 list_del_init(&res
->mob_head
);
1120 ret
= func
->destroy(res
);
1121 res
->backup_dirty
= true;
1122 res
->res_dirty
= false;
1124 vmw_resource_backoff_reservation(&ticket
, &val_buf
);
1131 * vmw_resource_validate - Make a resource up-to-date and visible
1134 * @res: The resource to make visible to the device.
1136 * On succesful return, any backup DMA buffer pointed to by @res->backup will
1137 * be reserved and validated.
1138 * On hardware resource shortage, this function will repeatedly evict
1139 * resources of the same type until the validation succeeds.
1141 int vmw_resource_validate(struct vmw_resource
*res
)
1144 struct vmw_resource
*evict_res
;
1145 struct vmw_private
*dev_priv
= res
->dev_priv
;
1146 struct list_head
*lru_list
= &dev_priv
->res_lru
[res
->func
->res_type
];
1147 struct ttm_validate_buffer val_buf
;
1148 unsigned err_count
= 0;
1150 if (likely(!res
->func
->may_evict
))
1155 val_buf
.bo
= &res
->backup
->base
;
1157 ret
= vmw_resource_do_validate(res
, &val_buf
);
1158 if (likely(ret
!= -EBUSY
))
1161 write_lock(&dev_priv
->resource_lock
);
1162 if (list_empty(lru_list
) || !res
->func
->may_evict
) {
1163 DRM_ERROR("Out of device device resources "
1164 "for %s.\n", res
->func
->type_name
);
1166 write_unlock(&dev_priv
->resource_lock
);
1170 evict_res
= vmw_resource_reference
1171 (list_first_entry(lru_list
, struct vmw_resource
,
1173 list_del_init(&evict_res
->lru_head
);
1175 write_unlock(&dev_priv
->resource_lock
);
1177 ret
= vmw_resource_do_evict(evict_res
, true);
1178 if (unlikely(ret
!= 0)) {
1179 write_lock(&dev_priv
->resource_lock
);
1180 list_add_tail(&evict_res
->lru_head
, lru_list
);
1181 write_unlock(&dev_priv
->resource_lock
);
1182 if (ret
== -ERESTARTSYS
||
1183 ++err_count
> VMW_RES_EVICT_ERR_COUNT
) {
1184 vmw_resource_unreference(&evict_res
);
1185 goto out_no_validate
;
1189 vmw_resource_unreference(&evict_res
);
1192 if (unlikely(ret
!= 0))
1193 goto out_no_validate
;
1194 else if (!res
->func
->needs_backup
&& res
->backup
) {
1195 list_del_init(&res
->mob_head
);
1196 vmw_dmabuf_unreference(&res
->backup
);
1206 * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1207 * object without unreserving it.
1209 * @bo: Pointer to the struct ttm_buffer_object to fence.
1210 * @fence: Pointer to the fence. If NULL, this function will
1211 * insert a fence into the command stream..
1213 * Contrary to the ttm_eu version of this function, it takes only
1214 * a single buffer object instead of a list, and it also doesn't
1215 * unreserve the buffer object, which needs to be done separately.
1217 void vmw_fence_single_bo(struct ttm_buffer_object
*bo
,
1218 struct vmw_fence_obj
*fence
)
1220 struct ttm_bo_device
*bdev
= bo
->bdev
;
1221 struct ttm_bo_driver
*driver
= bdev
->driver
;
1222 struct vmw_fence_obj
*old_fence_obj
;
1223 struct vmw_private
*dev_priv
=
1224 container_of(bdev
, struct vmw_private
, bdev
);
1227 vmw_execbuf_fence_commands(NULL
, dev_priv
, &fence
, NULL
);
1229 driver
->sync_obj_ref(fence
);
1231 spin_lock(&bdev
->fence_lock
);
1233 old_fence_obj
= bo
->sync_obj
;
1234 bo
->sync_obj
= fence
;
1236 spin_unlock(&bdev
->fence_lock
);
1239 vmw_fence_obj_unreference(&old_fence_obj
);
1243 * vmw_resource_move_notify - TTM move_notify_callback
1245 * @bo: The TTM buffer object about to move.
1246 * @mem: The truct ttm_mem_reg indicating to what memory
1247 * region the move is taking place.
1249 * For now does nothing.
1251 void vmw_resource_move_notify(struct ttm_buffer_object
*bo
,
1252 struct ttm_mem_reg
*mem
)
1257 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1259 * @res: The resource being queried.
1261 bool vmw_resource_needs_backup(const struct vmw_resource
*res
)
1263 return res
->func
->needs_backup
;
1267 * vmw_resource_evict_type - Evict all resources of a specific type
1269 * @dev_priv: Pointer to a device private struct
1270 * @type: The resource type to evict
1272 * To avoid thrashing starvation or as part of the hibernation sequence,
1273 * try to evict all evictable resources of a specific type.
1275 static void vmw_resource_evict_type(struct vmw_private
*dev_priv
,
1276 enum vmw_res_type type
)
1278 struct list_head
*lru_list
= &dev_priv
->res_lru
[type
];
1279 struct vmw_resource
*evict_res
;
1280 unsigned err_count
= 0;
1284 write_lock(&dev_priv
->resource_lock
);
1286 if (list_empty(lru_list
))
1289 evict_res
= vmw_resource_reference(
1290 list_first_entry(lru_list
, struct vmw_resource
,
1292 list_del_init(&evict_res
->lru_head
);
1293 write_unlock(&dev_priv
->resource_lock
);
1295 ret
= vmw_resource_do_evict(evict_res
, false);
1296 if (unlikely(ret
!= 0)) {
1297 write_lock(&dev_priv
->resource_lock
);
1298 list_add_tail(&evict_res
->lru_head
, lru_list
);
1299 write_unlock(&dev_priv
->resource_lock
);
1300 if (++err_count
> VMW_RES_EVICT_ERR_COUNT
) {
1301 vmw_resource_unreference(&evict_res
);
1306 vmw_resource_unreference(&evict_res
);
1310 write_unlock(&dev_priv
->resource_lock
);
1314 * vmw_resource_evict_all - Evict all evictable resources
1316 * @dev_priv: Pointer to a device private struct
1318 * To avoid thrashing starvation or as part of the hibernation sequence,
1319 * evict all evictable resources. In particular this means that all
1320 * guest-backed resources that are registered with the device are
1321 * evicted and the OTable becomes clean.
1323 void vmw_resource_evict_all(struct vmw_private
*dev_priv
)
1325 enum vmw_res_type type
;
1327 mutex_lock(&dev_priv
->cmdbuf_mutex
);
1329 for (type
= 0; type
< vmw_res_max
; ++type
)
1330 vmw_resource_evict_type(dev_priv
, type
);
1332 mutex_unlock(&dev_priv
->cmdbuf_mutex
);