1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_object.h>
31 #include <drm/ttm/ttm_placement.h>
33 #include "vmwgfx_resource_priv.h"
35 #define VMW_RES_EVICT_ERR_COUNT 10
37 struct vmw_user_dma_buffer
{
38 struct ttm_prime_object prime
;
39 struct vmw_dma_buffer dma
;
42 struct vmw_bo_user_rep
{
48 struct vmw_resource res
;
52 struct vmw_user_stream
{
53 struct ttm_base_object base
;
54 struct vmw_stream stream
;
58 static uint64_t vmw_user_stream_size
;
60 static const struct vmw_res_func vmw_stream_func
= {
61 .res_type
= vmw_res_stream
,
62 .needs_backup
= false,
64 .type_name
= "video streams",
65 .backup_placement
= NULL
,
72 static inline struct vmw_dma_buffer
*
73 vmw_dma_buffer(struct ttm_buffer_object
*bo
)
75 return container_of(bo
, struct vmw_dma_buffer
, base
);
78 static inline struct vmw_user_dma_buffer
*
79 vmw_user_dma_buffer(struct ttm_buffer_object
*bo
)
81 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
82 return container_of(vmw_bo
, struct vmw_user_dma_buffer
, dma
);
85 struct vmw_resource
*vmw_resource_reference(struct vmw_resource
*res
)
92 vmw_resource_reference_unless_doomed(struct vmw_resource
*res
)
94 return kref_get_unless_zero(&res
->kref
) ? res
: NULL
;
98 * vmw_resource_release_id - release a resource id to the id manager.
100 * @res: Pointer to the resource.
102 * Release the resource id to the resource id manager and set it to -1
104 void vmw_resource_release_id(struct vmw_resource
*res
)
106 struct vmw_private
*dev_priv
= res
->dev_priv
;
107 struct idr
*idr
= &dev_priv
->res_idr
[res
->func
->res_type
];
109 write_lock(&dev_priv
->resource_lock
);
111 idr_remove(idr
, res
->id
);
113 write_unlock(&dev_priv
->resource_lock
);
116 static void vmw_resource_release(struct kref
*kref
)
118 struct vmw_resource
*res
=
119 container_of(kref
, struct vmw_resource
, kref
);
120 struct vmw_private
*dev_priv
= res
->dev_priv
;
122 struct idr
*idr
= &dev_priv
->res_idr
[res
->func
->res_type
];
125 list_del_init(&res
->lru_head
);
126 write_unlock(&dev_priv
->resource_lock
);
128 struct ttm_buffer_object
*bo
= &res
->backup
->base
;
130 ttm_bo_reserve(bo
, false, false, false, NULL
);
131 if (!list_empty(&res
->mob_head
) &&
132 res
->func
->unbind
!= NULL
) {
133 struct ttm_validate_buffer val_buf
;
136 val_buf
.shared
= false;
137 res
->func
->unbind(res
, false, &val_buf
);
139 res
->backup_dirty
= false;
140 list_del_init(&res
->mob_head
);
141 ttm_bo_unreserve(bo
);
142 vmw_dmabuf_unreference(&res
->backup
);
145 if (likely(res
->hw_destroy
!= NULL
)) {
146 res
->hw_destroy(res
);
147 mutex_lock(&dev_priv
->binding_mutex
);
148 vmw_context_binding_res_list_kill(&res
->binding_head
);
149 mutex_unlock(&dev_priv
->binding_mutex
);
153 if (res
->res_free
!= NULL
)
158 write_lock(&dev_priv
->resource_lock
);
164 void vmw_resource_unreference(struct vmw_resource
**p_res
)
166 struct vmw_resource
*res
= *p_res
;
167 struct vmw_private
*dev_priv
= res
->dev_priv
;
170 write_lock(&dev_priv
->resource_lock
);
171 kref_put(&res
->kref
, vmw_resource_release
);
172 write_unlock(&dev_priv
->resource_lock
);
177 * vmw_resource_alloc_id - release a resource id to the id manager.
179 * @res: Pointer to the resource.
181 * Allocate the lowest free resource from the resource manager, and set
182 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
184 int vmw_resource_alloc_id(struct vmw_resource
*res
)
186 struct vmw_private
*dev_priv
= res
->dev_priv
;
188 struct idr
*idr
= &dev_priv
->res_idr
[res
->func
->res_type
];
190 BUG_ON(res
->id
!= -1);
192 idr_preload(GFP_KERNEL
);
193 write_lock(&dev_priv
->resource_lock
);
195 ret
= idr_alloc(idr
, res
, 1, 0, GFP_NOWAIT
);
199 write_unlock(&dev_priv
->resource_lock
);
201 return ret
< 0 ? ret
: 0;
205 * vmw_resource_init - initialize a struct vmw_resource
207 * @dev_priv: Pointer to a device private struct.
208 * @res: The struct vmw_resource to initialize.
209 * @obj_type: Resource object type.
210 * @delay_id: Boolean whether to defer device id allocation until
211 * the first validation.
212 * @res_free: Resource destructor.
213 * @func: Resource function table.
215 int vmw_resource_init(struct vmw_private
*dev_priv
, struct vmw_resource
*res
,
217 void (*res_free
) (struct vmw_resource
*res
),
218 const struct vmw_res_func
*func
)
220 kref_init(&res
->kref
);
221 res
->hw_destroy
= NULL
;
222 res
->res_free
= res_free
;
224 res
->dev_priv
= dev_priv
;
226 INIT_LIST_HEAD(&res
->lru_head
);
227 INIT_LIST_HEAD(&res
->mob_head
);
228 INIT_LIST_HEAD(&res
->binding_head
);
231 res
->backup_offset
= 0;
232 res
->backup_dirty
= false;
233 res
->res_dirty
= false;
237 return vmw_resource_alloc_id(res
);
241 * vmw_resource_activate
243 * @res: Pointer to the newly created resource
244 * @hw_destroy: Destroy function. NULL if none.
246 * Activate a resource after the hardware has been made aware of it.
247 * Set tye destroy function to @destroy. Typically this frees the
248 * resource and destroys the hardware resources associated with it.
249 * Activate basically means that the function vmw_resource_lookup will
252 void vmw_resource_activate(struct vmw_resource
*res
,
253 void (*hw_destroy
) (struct vmw_resource
*))
255 struct vmw_private
*dev_priv
= res
->dev_priv
;
257 write_lock(&dev_priv
->resource_lock
);
259 res
->hw_destroy
= hw_destroy
;
260 write_unlock(&dev_priv
->resource_lock
);
263 struct vmw_resource
*vmw_resource_lookup(struct vmw_private
*dev_priv
,
264 struct idr
*idr
, int id
)
266 struct vmw_resource
*res
;
268 read_lock(&dev_priv
->resource_lock
);
269 res
= idr_find(idr
, id
);
270 if (res
&& res
->avail
)
271 kref_get(&res
->kref
);
274 read_unlock(&dev_priv
->resource_lock
);
276 if (unlikely(res
== NULL
))
283 * vmw_user_resource_lookup_handle - lookup a struct resource from a
284 * TTM user-space handle and perform basic type checks
286 * @dev_priv: Pointer to a device private struct
287 * @tfile: Pointer to a struct ttm_object_file identifying the caller
288 * @handle: The TTM user-space handle
289 * @converter: Pointer to an object describing the resource type
290 * @p_res: On successful return the location pointed to will contain
291 * a pointer to a refcounted struct vmw_resource.
293 * If the handle can't be found or is associated with an incorrect resource
294 * type, -EINVAL will be returned.
296 int vmw_user_resource_lookup_handle(struct vmw_private
*dev_priv
,
297 struct ttm_object_file
*tfile
,
299 const struct vmw_user_resource_conv
301 struct vmw_resource
**p_res
)
303 struct ttm_base_object
*base
;
304 struct vmw_resource
*res
;
307 base
= ttm_base_object_lookup(tfile
, handle
);
308 if (unlikely(base
== NULL
))
311 if (unlikely(ttm_base_object_type(base
) != converter
->object_type
))
312 goto out_bad_resource
;
314 res
= converter
->base_obj_to_res(base
);
316 read_lock(&dev_priv
->resource_lock
);
317 if (!res
->avail
|| res
->res_free
!= converter
->res_free
) {
318 read_unlock(&dev_priv
->resource_lock
);
319 goto out_bad_resource
;
322 kref_get(&res
->kref
);
323 read_unlock(&dev_priv
->resource_lock
);
329 ttm_base_object_unref(&base
);
335 * Helper function that looks either a surface or dmabuf.
337 * The pointer this pointed at by out_surf and out_buf needs to be null.
339 int vmw_user_lookup_handle(struct vmw_private
*dev_priv
,
340 struct ttm_object_file
*tfile
,
342 struct vmw_surface
**out_surf
,
343 struct vmw_dma_buffer
**out_buf
)
345 struct vmw_resource
*res
;
348 BUG_ON(*out_surf
|| *out_buf
);
350 ret
= vmw_user_resource_lookup_handle(dev_priv
, tfile
, handle
,
351 user_surface_converter
,
354 *out_surf
= vmw_res_to_srf(res
);
359 ret
= vmw_user_dmabuf_lookup(tfile
, handle
, out_buf
);
368 * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
370 * @dev_priv: Pointer to a struct vmw_private identifying the device.
371 * @size: The requested buffer size.
372 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
374 static size_t vmw_dmabuf_acc_size(struct vmw_private
*dev_priv
, size_t size
,
377 static size_t struct_size
, user_struct_size
;
378 size_t num_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
379 size_t page_array_size
= ttm_round_pot(num_pages
* sizeof(void *));
381 if (unlikely(struct_size
== 0)) {
382 size_t backend_size
= ttm_round_pot(vmw_tt_size
);
384 struct_size
= backend_size
+
385 ttm_round_pot(sizeof(struct vmw_dma_buffer
));
386 user_struct_size
= backend_size
+
387 ttm_round_pot(sizeof(struct vmw_user_dma_buffer
));
390 if (dev_priv
->map_mode
== vmw_dma_alloc_coherent
)
392 ttm_round_pot(num_pages
* sizeof(dma_addr_t
));
394 return ((user
) ? user_struct_size
: struct_size
) +
398 void vmw_dmabuf_bo_free(struct ttm_buffer_object
*bo
)
400 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
405 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object
*bo
)
407 struct vmw_user_dma_buffer
*vmw_user_bo
= vmw_user_dma_buffer(bo
);
409 ttm_prime_object_kfree(vmw_user_bo
, prime
);
412 int vmw_dmabuf_init(struct vmw_private
*dev_priv
,
413 struct vmw_dma_buffer
*vmw_bo
,
414 size_t size
, struct ttm_placement
*placement
,
416 void (*bo_free
) (struct ttm_buffer_object
*bo
))
418 struct ttm_bo_device
*bdev
= &dev_priv
->bdev
;
421 bool user
= (bo_free
== &vmw_user_dmabuf_destroy
);
423 BUG_ON(!bo_free
&& (!user
&& (bo_free
!= vmw_dmabuf_bo_free
)));
425 acc_size
= vmw_dmabuf_acc_size(dev_priv
, size
, user
);
426 memset(vmw_bo
, 0, sizeof(*vmw_bo
));
428 INIT_LIST_HEAD(&vmw_bo
->res_list
);
430 ret
= ttm_bo_init(bdev
, &vmw_bo
->base
, size
,
431 ttm_bo_type_device
, placement
,
433 NULL
, acc_size
, NULL
, NULL
, bo_free
);
437 static void vmw_user_dmabuf_release(struct ttm_base_object
**p_base
)
439 struct vmw_user_dma_buffer
*vmw_user_bo
;
440 struct ttm_base_object
*base
= *p_base
;
441 struct ttm_buffer_object
*bo
;
445 if (unlikely(base
== NULL
))
448 vmw_user_bo
= container_of(base
, struct vmw_user_dma_buffer
,
450 bo
= &vmw_user_bo
->dma
.base
;
454 static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object
*base
,
455 enum ttm_ref_type ref_type
)
457 struct vmw_user_dma_buffer
*user_bo
;
458 user_bo
= container_of(base
, struct vmw_user_dma_buffer
, prime
.base
);
461 case TTM_REF_SYNCCPU_WRITE
:
462 ttm_bo_synccpu_write_release(&user_bo
->dma
.base
);
470 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
472 * @dev_priv: Pointer to a struct device private.
473 * @tfile: Pointer to a struct ttm_object_file on which to register the user
475 * @size: Size of the dma buffer.
476 * @shareable: Boolean whether the buffer is shareable with other open files.
477 * @handle: Pointer to where the handle value should be assigned.
478 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
479 * should be assigned.
481 int vmw_user_dmabuf_alloc(struct vmw_private
*dev_priv
,
482 struct ttm_object_file
*tfile
,
486 struct vmw_dma_buffer
**p_dma_buf
)
488 struct vmw_user_dma_buffer
*user_bo
;
489 struct ttm_buffer_object
*tmp
;
492 user_bo
= kzalloc(sizeof(*user_bo
), GFP_KERNEL
);
493 if (unlikely(user_bo
== NULL
)) {
494 DRM_ERROR("Failed to allocate a buffer.\n");
498 ret
= vmw_dmabuf_init(dev_priv
, &user_bo
->dma
, size
,
499 (dev_priv
->has_mob
) ?
501 &vmw_vram_sys_placement
, true,
502 &vmw_user_dmabuf_destroy
);
503 if (unlikely(ret
!= 0))
506 tmp
= ttm_bo_reference(&user_bo
->dma
.base
);
507 ret
= ttm_prime_object_init(tfile
,
512 &vmw_user_dmabuf_release
,
513 &vmw_user_dmabuf_ref_obj_release
);
514 if (unlikely(ret
!= 0)) {
516 goto out_no_base_object
;
519 *p_dma_buf
= &user_bo
->dma
;
520 *handle
= user_bo
->prime
.base
.hash
.key
;
527 * vmw_user_dmabuf_verify_access - verify access permissions on this
530 * @bo: Pointer to the buffer object being accessed
531 * @tfile: Identifying the caller.
533 int vmw_user_dmabuf_verify_access(struct ttm_buffer_object
*bo
,
534 struct ttm_object_file
*tfile
)
536 struct vmw_user_dma_buffer
*vmw_user_bo
;
538 if (unlikely(bo
->destroy
!= vmw_user_dmabuf_destroy
))
541 vmw_user_bo
= vmw_user_dma_buffer(bo
);
543 /* Check that the caller has opened the object. */
544 if (likely(ttm_ref_object_exists(tfile
, &vmw_user_bo
->prime
.base
)))
547 DRM_ERROR("Could not grant buffer access.\n");
552 * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
553 * access, idling previous GPU operations on the buffer and optionally
554 * blocking it for further command submissions.
556 * @user_bo: Pointer to the buffer object being grabbed for CPU access
557 * @tfile: Identifying the caller.
558 * @flags: Flags indicating how the grab should be performed.
560 * A blocking grab will be automatically released when @tfile is closed.
562 static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer
*user_bo
,
563 struct ttm_object_file
*tfile
,
566 struct ttm_buffer_object
*bo
= &user_bo
->dma
.base
;
570 if (flags
& drm_vmw_synccpu_allow_cs
) {
571 bool nonblock
= !!(flags
& drm_vmw_synccpu_dontblock
);
575 return reservation_object_test_signaled_rcu(bo
->resv
, true) ? 0 : -EBUSY
;
577 lret
= reservation_object_wait_timeout_rcu(bo
->resv
, true, true, MAX_SCHEDULE_TIMEOUT
);
585 ret
= ttm_bo_synccpu_write_grab
586 (bo
, !!(flags
& drm_vmw_synccpu_dontblock
));
587 if (unlikely(ret
!= 0))
590 ret
= ttm_ref_object_add(tfile
, &user_bo
->prime
.base
,
591 TTM_REF_SYNCCPU_WRITE
, &existed
);
592 if (ret
!= 0 || existed
)
593 ttm_bo_synccpu_write_release(&user_bo
->dma
.base
);
599 * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
600 * and unblock command submission on the buffer if blocked.
602 * @handle: Handle identifying the buffer object.
603 * @tfile: Identifying the caller.
604 * @flags: Flags indicating the type of release.
606 static int vmw_user_dmabuf_synccpu_release(uint32_t handle
,
607 struct ttm_object_file
*tfile
,
610 if (!(flags
& drm_vmw_synccpu_allow_cs
))
611 return ttm_ref_object_base_unref(tfile
, handle
,
612 TTM_REF_SYNCCPU_WRITE
);
618 * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
621 * @dev: Identifies the drm device.
622 * @data: Pointer to the ioctl argument.
623 * @file_priv: Identifies the caller.
625 * This function checks the ioctl arguments for validity and calls the
626 * relevant synccpu functions.
628 int vmw_user_dmabuf_synccpu_ioctl(struct drm_device
*dev
, void *data
,
629 struct drm_file
*file_priv
)
631 struct drm_vmw_synccpu_arg
*arg
=
632 (struct drm_vmw_synccpu_arg
*) data
;
633 struct vmw_dma_buffer
*dma_buf
;
634 struct vmw_user_dma_buffer
*user_bo
;
635 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
638 if ((arg
->flags
& (drm_vmw_synccpu_read
| drm_vmw_synccpu_write
)) == 0
639 || (arg
->flags
& ~(drm_vmw_synccpu_read
| drm_vmw_synccpu_write
|
640 drm_vmw_synccpu_dontblock
|
641 drm_vmw_synccpu_allow_cs
)) != 0) {
642 DRM_ERROR("Illegal synccpu flags.\n");
647 case drm_vmw_synccpu_grab
:
648 ret
= vmw_user_dmabuf_lookup(tfile
, arg
->handle
, &dma_buf
);
649 if (unlikely(ret
!= 0))
652 user_bo
= container_of(dma_buf
, struct vmw_user_dma_buffer
,
654 ret
= vmw_user_dmabuf_synccpu_grab(user_bo
, tfile
, arg
->flags
);
655 vmw_dmabuf_unreference(&dma_buf
);
656 if (unlikely(ret
!= 0 && ret
!= -ERESTARTSYS
&&
658 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
659 (unsigned int) arg
->handle
);
663 case drm_vmw_synccpu_release
:
664 ret
= vmw_user_dmabuf_synccpu_release(arg
->handle
, tfile
,
666 if (unlikely(ret
!= 0)) {
667 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
668 (unsigned int) arg
->handle
);
673 DRM_ERROR("Invalid synccpu operation.\n");
680 int vmw_dmabuf_alloc_ioctl(struct drm_device
*dev
, void *data
,
681 struct drm_file
*file_priv
)
683 struct vmw_private
*dev_priv
= vmw_priv(dev
);
684 union drm_vmw_alloc_dmabuf_arg
*arg
=
685 (union drm_vmw_alloc_dmabuf_arg
*)data
;
686 struct drm_vmw_alloc_dmabuf_req
*req
= &arg
->req
;
687 struct drm_vmw_dmabuf_rep
*rep
= &arg
->rep
;
688 struct vmw_dma_buffer
*dma_buf
;
692 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
693 if (unlikely(ret
!= 0))
696 ret
= vmw_user_dmabuf_alloc(dev_priv
, vmw_fpriv(file_priv
)->tfile
,
697 req
->size
, false, &handle
, &dma_buf
);
698 if (unlikely(ret
!= 0))
701 rep
->handle
= handle
;
702 rep
->map_handle
= drm_vma_node_offset_addr(&dma_buf
->base
.vma_node
);
703 rep
->cur_gmr_id
= handle
;
704 rep
->cur_gmr_offset
= 0;
706 vmw_dmabuf_unreference(&dma_buf
);
709 ttm_read_unlock(&dev_priv
->reservation_sem
);
714 int vmw_dmabuf_unref_ioctl(struct drm_device
*dev
, void *data
,
715 struct drm_file
*file_priv
)
717 struct drm_vmw_unref_dmabuf_arg
*arg
=
718 (struct drm_vmw_unref_dmabuf_arg
*)data
;
720 return ttm_ref_object_base_unref(vmw_fpriv(file_priv
)->tfile
,
725 int vmw_user_dmabuf_lookup(struct ttm_object_file
*tfile
,
726 uint32_t handle
, struct vmw_dma_buffer
**out
)
728 struct vmw_user_dma_buffer
*vmw_user_bo
;
729 struct ttm_base_object
*base
;
731 base
= ttm_base_object_lookup(tfile
, handle
);
732 if (unlikely(base
== NULL
)) {
733 printk(KERN_ERR
"Invalid buffer object handle 0x%08lx.\n",
734 (unsigned long)handle
);
738 if (unlikely(ttm_base_object_type(base
) != ttm_buffer_type
)) {
739 ttm_base_object_unref(&base
);
740 printk(KERN_ERR
"Invalid buffer object handle 0x%08lx.\n",
741 (unsigned long)handle
);
745 vmw_user_bo
= container_of(base
, struct vmw_user_dma_buffer
,
747 (void)ttm_bo_reference(&vmw_user_bo
->dma
.base
);
748 ttm_base_object_unref(&base
);
749 *out
= &vmw_user_bo
->dma
;
754 int vmw_user_dmabuf_reference(struct ttm_object_file
*tfile
,
755 struct vmw_dma_buffer
*dma_buf
,
758 struct vmw_user_dma_buffer
*user_bo
;
760 if (dma_buf
->base
.destroy
!= vmw_user_dmabuf_destroy
)
763 user_bo
= container_of(dma_buf
, struct vmw_user_dma_buffer
, dma
);
765 *handle
= user_bo
->prime
.base
.hash
.key
;
766 return ttm_ref_object_add(tfile
, &user_bo
->prime
.base
,
767 TTM_REF_USAGE
, NULL
);
774 static void vmw_stream_destroy(struct vmw_resource
*res
)
776 struct vmw_private
*dev_priv
= res
->dev_priv
;
777 struct vmw_stream
*stream
;
780 DRM_INFO("%s: unref\n", __func__
);
781 stream
= container_of(res
, struct vmw_stream
, res
);
783 ret
= vmw_overlay_unref(dev_priv
, stream
->stream_id
);
787 static int vmw_stream_init(struct vmw_private
*dev_priv
,
788 struct vmw_stream
*stream
,
789 void (*res_free
) (struct vmw_resource
*res
))
791 struct vmw_resource
*res
= &stream
->res
;
794 ret
= vmw_resource_init(dev_priv
, res
, false, res_free
,
797 if (unlikely(ret
!= 0)) {
798 if (res_free
== NULL
)
801 res_free(&stream
->res
);
805 ret
= vmw_overlay_claim(dev_priv
, &stream
->stream_id
);
807 vmw_resource_unreference(&res
);
811 DRM_INFO("%s: claimed\n", __func__
);
813 vmw_resource_activate(&stream
->res
, vmw_stream_destroy
);
817 static void vmw_user_stream_free(struct vmw_resource
*res
)
819 struct vmw_user_stream
*stream
=
820 container_of(res
, struct vmw_user_stream
, stream
.res
);
821 struct vmw_private
*dev_priv
= res
->dev_priv
;
823 ttm_base_object_kfree(stream
, base
);
824 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
825 vmw_user_stream_size
);
829 * This function is called when user space has no more references on the
830 * base object. It releases the base-object's reference on the resource object.
833 static void vmw_user_stream_base_release(struct ttm_base_object
**p_base
)
835 struct ttm_base_object
*base
= *p_base
;
836 struct vmw_user_stream
*stream
=
837 container_of(base
, struct vmw_user_stream
, base
);
838 struct vmw_resource
*res
= &stream
->stream
.res
;
841 vmw_resource_unreference(&res
);
844 int vmw_stream_unref_ioctl(struct drm_device
*dev
, void *data
,
845 struct drm_file
*file_priv
)
847 struct vmw_private
*dev_priv
= vmw_priv(dev
);
848 struct vmw_resource
*res
;
849 struct vmw_user_stream
*stream
;
850 struct drm_vmw_stream_arg
*arg
= (struct drm_vmw_stream_arg
*)data
;
851 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
852 struct idr
*idr
= &dev_priv
->res_idr
[vmw_res_stream
];
856 res
= vmw_resource_lookup(dev_priv
, idr
, arg
->stream_id
);
857 if (unlikely(res
== NULL
))
860 if (res
->res_free
!= &vmw_user_stream_free
) {
865 stream
= container_of(res
, struct vmw_user_stream
, stream
.res
);
866 if (stream
->base
.tfile
!= tfile
) {
871 ttm_ref_object_base_unref(tfile
, stream
->base
.hash
.key
, TTM_REF_USAGE
);
873 vmw_resource_unreference(&res
);
877 int vmw_stream_claim_ioctl(struct drm_device
*dev
, void *data
,
878 struct drm_file
*file_priv
)
880 struct vmw_private
*dev_priv
= vmw_priv(dev
);
881 struct vmw_user_stream
*stream
;
882 struct vmw_resource
*res
;
883 struct vmw_resource
*tmp
;
884 struct drm_vmw_stream_arg
*arg
= (struct drm_vmw_stream_arg
*)data
;
885 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
889 * Approximate idr memory usage with 128 bytes. It will be limited
890 * by maximum number_of streams anyway?
893 if (unlikely(vmw_user_stream_size
== 0))
894 vmw_user_stream_size
= ttm_round_pot(sizeof(*stream
)) + 128;
896 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
897 if (unlikely(ret
!= 0))
900 ret
= ttm_mem_global_alloc(vmw_mem_glob(dev_priv
),
901 vmw_user_stream_size
,
903 if (unlikely(ret
!= 0)) {
904 if (ret
!= -ERESTARTSYS
)
905 DRM_ERROR("Out of graphics memory for stream"
911 stream
= kmalloc(sizeof(*stream
), GFP_KERNEL
);
912 if (unlikely(stream
== NULL
)) {
913 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
914 vmw_user_stream_size
);
919 res
= &stream
->stream
.res
;
920 stream
->base
.shareable
= false;
921 stream
->base
.tfile
= NULL
;
924 * From here on, the destructor takes over resource freeing.
927 ret
= vmw_stream_init(dev_priv
, &stream
->stream
, vmw_user_stream_free
);
928 if (unlikely(ret
!= 0))
931 tmp
= vmw_resource_reference(res
);
932 ret
= ttm_base_object_init(tfile
, &stream
->base
, false, VMW_RES_STREAM
,
933 &vmw_user_stream_base_release
, NULL
);
935 if (unlikely(ret
!= 0)) {
936 vmw_resource_unreference(&tmp
);
940 arg
->stream_id
= res
->id
;
942 vmw_resource_unreference(&res
);
944 ttm_read_unlock(&dev_priv
->reservation_sem
);
948 int vmw_user_stream_lookup(struct vmw_private
*dev_priv
,
949 struct ttm_object_file
*tfile
,
950 uint32_t *inout_id
, struct vmw_resource
**out
)
952 struct vmw_user_stream
*stream
;
953 struct vmw_resource
*res
;
956 res
= vmw_resource_lookup(dev_priv
, &dev_priv
->res_idr
[vmw_res_stream
],
958 if (unlikely(res
== NULL
))
961 if (res
->res_free
!= &vmw_user_stream_free
) {
966 stream
= container_of(res
, struct vmw_user_stream
, stream
.res
);
967 if (stream
->base
.tfile
!= tfile
) {
972 *inout_id
= stream
->stream
.stream_id
;
976 vmw_resource_unreference(&res
);
982 * vmw_dumb_create - Create a dumb kms buffer
984 * @file_priv: Pointer to a struct drm_file identifying the caller.
985 * @dev: Pointer to the drm device.
986 * @args: Pointer to a struct drm_mode_create_dumb structure
988 * This is a driver callback for the core drm create_dumb functionality.
989 * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
990 * that the arguments have a different format.
992 int vmw_dumb_create(struct drm_file
*file_priv
,
993 struct drm_device
*dev
,
994 struct drm_mode_create_dumb
*args
)
996 struct vmw_private
*dev_priv
= vmw_priv(dev
);
997 struct vmw_dma_buffer
*dma_buf
;
1000 args
->pitch
= args
->width
* ((args
->bpp
+ 7) / 8);
1001 args
->size
= args
->pitch
* args
->height
;
1003 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
1004 if (unlikely(ret
!= 0))
1007 ret
= vmw_user_dmabuf_alloc(dev_priv
, vmw_fpriv(file_priv
)->tfile
,
1008 args
->size
, false, &args
->handle
,
1010 if (unlikely(ret
!= 0))
1013 vmw_dmabuf_unreference(&dma_buf
);
1015 ttm_read_unlock(&dev_priv
->reservation_sem
);
1020 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1022 * @file_priv: Pointer to a struct drm_file identifying the caller.
1023 * @dev: Pointer to the drm device.
1024 * @handle: Handle identifying the dumb buffer.
1025 * @offset: The address space offset returned.
1027 * This is a driver callback for the core drm dumb_map_offset functionality.
1029 int vmw_dumb_map_offset(struct drm_file
*file_priv
,
1030 struct drm_device
*dev
, uint32_t handle
,
1033 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1034 struct vmw_dma_buffer
*out_buf
;
1037 ret
= vmw_user_dmabuf_lookup(tfile
, handle
, &out_buf
);
1041 *offset
= drm_vma_node_offset_addr(&out_buf
->base
.vma_node
);
1042 vmw_dmabuf_unreference(&out_buf
);
1047 * vmw_dumb_destroy - Destroy a dumb boffer
1049 * @file_priv: Pointer to a struct drm_file identifying the caller.
1050 * @dev: Pointer to the drm device.
1051 * @handle: Handle identifying the dumb buffer.
1053 * This is a driver callback for the core drm dumb_destroy functionality.
1055 int vmw_dumb_destroy(struct drm_file
*file_priv
,
1056 struct drm_device
*dev
,
1059 return ttm_ref_object_base_unref(vmw_fpriv(file_priv
)->tfile
,
1060 handle
, TTM_REF_USAGE
);
1064 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
1066 * @res: The resource for which to allocate a backup buffer.
1067 * @interruptible: Whether any sleeps during allocation should be
1068 * performed while interruptible.
1070 static int vmw_resource_buf_alloc(struct vmw_resource
*res
,
1073 unsigned long size
=
1074 (res
->backup_size
+ PAGE_SIZE
- 1) & PAGE_MASK
;
1075 struct vmw_dma_buffer
*backup
;
1078 if (likely(res
->backup
)) {
1079 BUG_ON(res
->backup
->base
.num_pages
* PAGE_SIZE
< size
);
1083 backup
= kzalloc(sizeof(*backup
), GFP_KERNEL
);
1084 if (unlikely(backup
== NULL
))
1087 ret
= vmw_dmabuf_init(res
->dev_priv
, backup
, res
->backup_size
,
1088 res
->func
->backup_placement
,
1090 &vmw_dmabuf_bo_free
);
1091 if (unlikely(ret
!= 0))
1094 res
->backup
= backup
;
1101 * vmw_resource_do_validate - Make a resource up-to-date and visible
1104 * @res: The resource to make visible to the device.
1105 * @val_buf: Information about a buffer possibly
1106 * containing backup data if a bind operation is needed.
1108 * On hardware resource shortage, this function returns -EBUSY and
1109 * should be retried once resources have been freed up.
1111 static int vmw_resource_do_validate(struct vmw_resource
*res
,
1112 struct ttm_validate_buffer
*val_buf
)
1115 const struct vmw_res_func
*func
= res
->func
;
1117 if (unlikely(res
->id
== -1)) {
1118 ret
= func
->create(res
);
1119 if (unlikely(ret
!= 0))
1124 ((func
->needs_backup
&& list_empty(&res
->mob_head
) &&
1125 val_buf
->bo
!= NULL
) ||
1126 (!func
->needs_backup
&& val_buf
->bo
!= NULL
))) {
1127 ret
= func
->bind(res
, val_buf
);
1128 if (unlikely(ret
!= 0))
1129 goto out_bind_failed
;
1130 if (func
->needs_backup
)
1131 list_add_tail(&res
->mob_head
, &res
->backup
->res_list
);
1135 * Only do this on write operations, and move to
1136 * vmw_resource_unreserve if it can be called after
1137 * backup buffers have been unreserved. Otherwise
1140 res
->res_dirty
= true;
1151 * vmw_resource_unreserve - Unreserve a resource previously reserved for
1152 * command submission.
1154 * @res: Pointer to the struct vmw_resource to unreserve.
1155 * @new_backup: Pointer to new backup buffer if command submission
1157 * @new_backup_offset: New backup offset if @new_backup is !NULL.
1159 * Currently unreserving a resource means putting it back on the device's
1160 * resource lru list, so that it can be evicted if necessary.
1162 void vmw_resource_unreserve(struct vmw_resource
*res
,
1163 struct vmw_dma_buffer
*new_backup
,
1164 unsigned long new_backup_offset
)
1166 struct vmw_private
*dev_priv
= res
->dev_priv
;
1168 if (!list_empty(&res
->lru_head
))
1171 if (new_backup
&& new_backup
!= res
->backup
) {
1174 lockdep_assert_held(&res
->backup
->base
.resv
->lock
.base
);
1175 list_del_init(&res
->mob_head
);
1176 vmw_dmabuf_unreference(&res
->backup
);
1179 res
->backup
= vmw_dmabuf_reference(new_backup
);
1180 lockdep_assert_held(&new_backup
->base
.resv
->lock
.base
);
1181 list_add_tail(&res
->mob_head
, &new_backup
->res_list
);
1184 res
->backup_offset
= new_backup_offset
;
1186 if (!res
->func
->may_evict
|| res
->id
== -1)
1189 write_lock(&dev_priv
->resource_lock
);
1190 list_add_tail(&res
->lru_head
,
1191 &res
->dev_priv
->res_lru
[res
->func
->res_type
]);
1192 write_unlock(&dev_priv
->resource_lock
);
1196 * vmw_resource_check_buffer - Check whether a backup buffer is needed
1197 * for a resource and in that case, allocate
1198 * one, reserve and validate it.
1200 * @res: The resource for which to allocate a backup buffer.
1201 * @interruptible: Whether any sleeps during allocation should be
1202 * performed while interruptible.
1203 * @val_buf: On successful return contains data about the
1204 * reserved and validated backup buffer.
1207 vmw_resource_check_buffer(struct vmw_resource
*res
,
1209 struct ttm_validate_buffer
*val_buf
)
1211 struct list_head val_list
;
1212 bool backup_dirty
= false;
1215 if (unlikely(res
->backup
== NULL
)) {
1216 ret
= vmw_resource_buf_alloc(res
, interruptible
);
1217 if (unlikely(ret
!= 0))
1221 INIT_LIST_HEAD(&val_list
);
1222 val_buf
->bo
= ttm_bo_reference(&res
->backup
->base
);
1223 val_buf
->shared
= false;
1224 list_add_tail(&val_buf
->head
, &val_list
);
1225 ret
= ttm_eu_reserve_buffers(NULL
, &val_list
, interruptible
, NULL
);
1226 if (unlikely(ret
!= 0))
1227 goto out_no_reserve
;
1229 if (res
->func
->needs_backup
&& list_empty(&res
->mob_head
))
1232 backup_dirty
= res
->backup_dirty
;
1233 ret
= ttm_bo_validate(&res
->backup
->base
,
1234 res
->func
->backup_placement
,
1237 if (unlikely(ret
!= 0))
1238 goto out_no_validate
;
1243 ttm_eu_backoff_reservation(NULL
, &val_list
);
1245 ttm_bo_unref(&val_buf
->bo
);
1247 vmw_dmabuf_unreference(&res
->backup
);
1253 * vmw_resource_reserve - Reserve a resource for command submission
1255 * @res: The resource to reserve.
1257 * This function takes the resource off the LRU list and make sure
1258 * a backup buffer is present for guest-backed resources. However,
1259 * the buffer may not be bound to the resource at this point.
1262 int vmw_resource_reserve(struct vmw_resource
*res
, bool no_backup
)
1264 struct vmw_private
*dev_priv
= res
->dev_priv
;
1267 write_lock(&dev_priv
->resource_lock
);
1268 list_del_init(&res
->lru_head
);
1269 write_unlock(&dev_priv
->resource_lock
);
1271 if (res
->func
->needs_backup
&& res
->backup
== NULL
&&
1273 ret
= vmw_resource_buf_alloc(res
, true);
1274 if (unlikely(ret
!= 0))
1282 * vmw_resource_backoff_reservation - Unreserve and unreference a
1285 * @val_buf: Backup buffer information.
1288 vmw_resource_backoff_reservation(struct ttm_validate_buffer
*val_buf
)
1290 struct list_head val_list
;
1292 if (likely(val_buf
->bo
== NULL
))
1295 INIT_LIST_HEAD(&val_list
);
1296 list_add_tail(&val_buf
->head
, &val_list
);
1297 ttm_eu_backoff_reservation(NULL
, &val_list
);
1298 ttm_bo_unref(&val_buf
->bo
);
1302 * vmw_resource_do_evict - Evict a resource, and transfer its data
1303 * to a backup buffer.
1305 * @res: The resource to evict.
1306 * @interruptible: Whether to wait interruptible.
1308 int vmw_resource_do_evict(struct vmw_resource
*res
, bool interruptible
)
1310 struct ttm_validate_buffer val_buf
;
1311 const struct vmw_res_func
*func
= res
->func
;
1314 BUG_ON(!func
->may_evict
);
1317 val_buf
.shared
= false;
1318 ret
= vmw_resource_check_buffer(res
, interruptible
, &val_buf
);
1319 if (unlikely(ret
!= 0))
1322 if (unlikely(func
->unbind
!= NULL
&&
1323 (!func
->needs_backup
|| !list_empty(&res
->mob_head
)))) {
1324 ret
= func
->unbind(res
, res
->res_dirty
, &val_buf
);
1325 if (unlikely(ret
!= 0))
1327 list_del_init(&res
->mob_head
);
1329 ret
= func
->destroy(res
);
1330 res
->backup_dirty
= true;
1331 res
->res_dirty
= false;
1333 vmw_resource_backoff_reservation(&val_buf
);
1340 * vmw_resource_validate - Make a resource up-to-date and visible
1343 * @res: The resource to make visible to the device.
1345 * On succesful return, any backup DMA buffer pointed to by @res->backup will
1346 * be reserved and validated.
1347 * On hardware resource shortage, this function will repeatedly evict
1348 * resources of the same type until the validation succeeds.
1350 int vmw_resource_validate(struct vmw_resource
*res
)
1353 struct vmw_resource
*evict_res
;
1354 struct vmw_private
*dev_priv
= res
->dev_priv
;
1355 struct list_head
*lru_list
= &dev_priv
->res_lru
[res
->func
->res_type
];
1356 struct ttm_validate_buffer val_buf
;
1357 unsigned err_count
= 0;
1359 if (likely(!res
->func
->may_evict
))
1363 val_buf
.shared
= false;
1365 val_buf
.bo
= &res
->backup
->base
;
1367 ret
= vmw_resource_do_validate(res
, &val_buf
);
1368 if (likely(ret
!= -EBUSY
))
1371 write_lock(&dev_priv
->resource_lock
);
1372 if (list_empty(lru_list
) || !res
->func
->may_evict
) {
1373 DRM_ERROR("Out of device device resources "
1374 "for %s.\n", res
->func
->type_name
);
1376 write_unlock(&dev_priv
->resource_lock
);
1380 evict_res
= vmw_resource_reference
1381 (list_first_entry(lru_list
, struct vmw_resource
,
1383 list_del_init(&evict_res
->lru_head
);
1385 write_unlock(&dev_priv
->resource_lock
);
1387 ret
= vmw_resource_do_evict(evict_res
, true);
1388 if (unlikely(ret
!= 0)) {
1389 write_lock(&dev_priv
->resource_lock
);
1390 list_add_tail(&evict_res
->lru_head
, lru_list
);
1391 write_unlock(&dev_priv
->resource_lock
);
1392 if (ret
== -ERESTARTSYS
||
1393 ++err_count
> VMW_RES_EVICT_ERR_COUNT
) {
1394 vmw_resource_unreference(&evict_res
);
1395 goto out_no_validate
;
1399 vmw_resource_unreference(&evict_res
);
1402 if (unlikely(ret
!= 0))
1403 goto out_no_validate
;
1404 else if (!res
->func
->needs_backup
&& res
->backup
) {
1405 list_del_init(&res
->mob_head
);
1406 vmw_dmabuf_unreference(&res
->backup
);
1416 * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1417 * object without unreserving it.
1419 * @bo: Pointer to the struct ttm_buffer_object to fence.
1420 * @fence: Pointer to the fence. If NULL, this function will
1421 * insert a fence into the command stream..
1423 * Contrary to the ttm_eu version of this function, it takes only
1424 * a single buffer object instead of a list, and it also doesn't
1425 * unreserve the buffer object, which needs to be done separately.
1427 void vmw_fence_single_bo(struct ttm_buffer_object
*bo
,
1428 struct vmw_fence_obj
*fence
)
1430 struct ttm_bo_device
*bdev
= bo
->bdev
;
1432 struct vmw_private
*dev_priv
=
1433 container_of(bdev
, struct vmw_private
, bdev
);
1435 if (fence
== NULL
) {
1436 vmw_execbuf_fence_commands(NULL
, dev_priv
, &fence
, NULL
);
1437 reservation_object_add_excl_fence(bo
->resv
, &fence
->base
);
1438 fence_put(&fence
->base
);
1440 reservation_object_add_excl_fence(bo
->resv
, &fence
->base
);
1444 * vmw_resource_move_notify - TTM move_notify_callback
1446 * @bo: The TTM buffer object about to move.
1447 * @mem: The truct ttm_mem_reg indicating to what memory
1448 * region the move is taking place.
1450 * Evicts the Guest Backed hardware resource if the backup
1451 * buffer is being moved out of MOB memory.
1452 * Note that this function should not race with the resource
1453 * validation code as long as it accesses only members of struct
1454 * resource that remain static while bo::res is !NULL and
1455 * while we have @bo reserved. struct resource::backup is *not* a
1456 * static member. The resource validation code will take care
1457 * to set @bo::res to NULL, while having @bo reserved when the
1458 * buffer is no longer bound to the resource, so @bo:res can be
1459 * used to determine whether there is a need to unbind and whether
1460 * it is safe to unbind.
1462 void vmw_resource_move_notify(struct ttm_buffer_object
*bo
,
1463 struct ttm_mem_reg
*mem
)
1465 struct vmw_dma_buffer
*dma_buf
;
1470 if (bo
->destroy
!= vmw_dmabuf_bo_free
&&
1471 bo
->destroy
!= vmw_user_dmabuf_destroy
)
1474 dma_buf
= container_of(bo
, struct vmw_dma_buffer
, base
);
1476 if (mem
->mem_type
!= VMW_PL_MOB
) {
1477 struct vmw_resource
*res
, *n
;
1478 struct ttm_validate_buffer val_buf
;
1481 val_buf
.shared
= false;
1483 list_for_each_entry_safe(res
, n
, &dma_buf
->res_list
, mob_head
) {
1485 if (unlikely(res
->func
->unbind
== NULL
))
1488 (void) res
->func
->unbind(res
, true, &val_buf
);
1489 res
->backup_dirty
= true;
1490 res
->res_dirty
= false;
1491 list_del_init(&res
->mob_head
);
1494 (void) ttm_bo_wait(bo
, false, false, false);
1499 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1501 * @res: The resource being queried.
1503 bool vmw_resource_needs_backup(const struct vmw_resource
*res
)
1505 return res
->func
->needs_backup
;
1509 * vmw_resource_evict_type - Evict all resources of a specific type
1511 * @dev_priv: Pointer to a device private struct
1512 * @type: The resource type to evict
1514 * To avoid thrashing starvation or as part of the hibernation sequence,
1515 * try to evict all evictable resources of a specific type.
1517 static void vmw_resource_evict_type(struct vmw_private
*dev_priv
,
1518 enum vmw_res_type type
)
1520 struct list_head
*lru_list
= &dev_priv
->res_lru
[type
];
1521 struct vmw_resource
*evict_res
;
1522 unsigned err_count
= 0;
1526 write_lock(&dev_priv
->resource_lock
);
1528 if (list_empty(lru_list
))
1531 evict_res
= vmw_resource_reference(
1532 list_first_entry(lru_list
, struct vmw_resource
,
1534 list_del_init(&evict_res
->lru_head
);
1535 write_unlock(&dev_priv
->resource_lock
);
1537 ret
= vmw_resource_do_evict(evict_res
, false);
1538 if (unlikely(ret
!= 0)) {
1539 write_lock(&dev_priv
->resource_lock
);
1540 list_add_tail(&evict_res
->lru_head
, lru_list
);
1541 write_unlock(&dev_priv
->resource_lock
);
1542 if (++err_count
> VMW_RES_EVICT_ERR_COUNT
) {
1543 vmw_resource_unreference(&evict_res
);
1548 vmw_resource_unreference(&evict_res
);
1552 write_unlock(&dev_priv
->resource_lock
);
1556 * vmw_resource_evict_all - Evict all evictable resources
1558 * @dev_priv: Pointer to a device private struct
1560 * To avoid thrashing starvation or as part of the hibernation sequence,
1561 * evict all evictable resources. In particular this means that all
1562 * guest-backed resources that are registered with the device are
1563 * evicted and the OTable becomes clean.
1565 void vmw_resource_evict_all(struct vmw_private
*dev_priv
)
1567 enum vmw_res_type type
;
1569 mutex_lock(&dev_priv
->cmdbuf_mutex
);
1571 for (type
= 0; type
< vmw_res_max
; ++type
)
1572 vmw_resource_evict_type(dev_priv
, type
);
1574 mutex_unlock(&dev_priv
->cmdbuf_mutex
);