1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <drm/ttm/ttm_placement.h>
30 #include "vmwgfx_resource_priv.h"
31 #include "vmwgfx_binding.h"
32 #include "vmwgfx_drv.h"
34 #define VMW_RES_EVICT_ERR_COUNT 10
37 * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
40 void vmw_resource_mob_attach(struct vmw_resource
*res
)
42 struct vmw_buffer_object
*backup
= res
->backup
;
43 struct rb_node
**new = &backup
->res_tree
.rb_node
, *parent
= NULL
;
45 dma_resv_assert_held(res
->backup
->base
.base
.resv
);
46 res
->used_prio
= (res
->res_dirty
) ? res
->func
->dirty_prio
:
50 struct vmw_resource
*this =
51 container_of(*new, struct vmw_resource
, mob_node
);
54 new = (res
->backup_offset
< this->backup_offset
) ?
55 &((*new)->rb_left
) : &((*new)->rb_right
);
58 rb_link_node(&res
->mob_node
, parent
, new);
59 rb_insert_color(&res
->mob_node
, &backup
->res_tree
);
61 vmw_bo_prio_add(backup
, res
->used_prio
);
65 * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
68 void vmw_resource_mob_detach(struct vmw_resource
*res
)
70 struct vmw_buffer_object
*backup
= res
->backup
;
72 dma_resv_assert_held(backup
->base
.base
.resv
);
73 if (vmw_resource_mob_attached(res
)) {
74 rb_erase(&res
->mob_node
, &backup
->res_tree
);
75 RB_CLEAR_NODE(&res
->mob_node
);
76 vmw_bo_prio_del(backup
, res
->used_prio
);
80 struct vmw_resource
*vmw_resource_reference(struct vmw_resource
*res
)
87 vmw_resource_reference_unless_doomed(struct vmw_resource
*res
)
89 return kref_get_unless_zero(&res
->kref
) ? res
: NULL
;
93 * vmw_resource_release_id - release a resource id to the id manager.
95 * @res: Pointer to the resource.
97 * Release the resource id to the resource id manager and set it to -1
99 void vmw_resource_release_id(struct vmw_resource
*res
)
101 struct vmw_private
*dev_priv
= res
->dev_priv
;
102 struct idr
*idr
= &dev_priv
->res_idr
[res
->func
->res_type
];
104 spin_lock(&dev_priv
->resource_lock
);
106 idr_remove(idr
, res
->id
);
108 spin_unlock(&dev_priv
->resource_lock
);
111 static void vmw_resource_release(struct kref
*kref
)
113 struct vmw_resource
*res
=
114 container_of(kref
, struct vmw_resource
, kref
);
115 struct vmw_private
*dev_priv
= res
->dev_priv
;
117 struct idr
*idr
= &dev_priv
->res_idr
[res
->func
->res_type
];
119 spin_lock(&dev_priv
->resource_lock
);
120 list_del_init(&res
->lru_head
);
121 spin_unlock(&dev_priv
->resource_lock
);
123 struct ttm_buffer_object
*bo
= &res
->backup
->base
;
125 ttm_bo_reserve(bo
, false, false, NULL
);
126 if (vmw_resource_mob_attached(res
) &&
127 res
->func
->unbind
!= NULL
) {
128 struct ttm_validate_buffer val_buf
;
131 val_buf
.num_shared
= 0;
132 res
->func
->unbind(res
, false, &val_buf
);
134 res
->backup_dirty
= false;
135 vmw_resource_mob_detach(res
);
137 res
->func
->dirty_free(res
);
139 vmw_bo_dirty_release(res
->backup
);
140 ttm_bo_unreserve(bo
);
141 vmw_bo_unreference(&res
->backup
);
144 if (likely(res
->hw_destroy
!= NULL
)) {
145 mutex_lock(&dev_priv
->binding_mutex
);
146 vmw_binding_res_list_kill(&res
->binding_head
);
147 mutex_unlock(&dev_priv
->binding_mutex
);
148 res
->hw_destroy(res
);
152 if (res
->res_free
!= NULL
)
157 spin_lock(&dev_priv
->resource_lock
);
160 spin_unlock(&dev_priv
->resource_lock
);
163 void vmw_resource_unreference(struct vmw_resource
**p_res
)
165 struct vmw_resource
*res
= *p_res
;
168 kref_put(&res
->kref
, vmw_resource_release
);
173 * vmw_resource_alloc_id - release a resource id to the id manager.
175 * @res: Pointer to the resource.
177 * Allocate the lowest free resource from the resource manager, and set
178 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
180 int vmw_resource_alloc_id(struct vmw_resource
*res
)
182 struct vmw_private
*dev_priv
= res
->dev_priv
;
184 struct idr
*idr
= &dev_priv
->res_idr
[res
->func
->res_type
];
186 BUG_ON(res
->id
!= -1);
188 idr_preload(GFP_KERNEL
);
189 spin_lock(&dev_priv
->resource_lock
);
191 ret
= idr_alloc(idr
, res
, 1, 0, GFP_NOWAIT
);
195 spin_unlock(&dev_priv
->resource_lock
);
197 return ret
< 0 ? ret
: 0;
201 * vmw_resource_init - initialize a struct vmw_resource
203 * @dev_priv: Pointer to a device private struct.
204 * @res: The struct vmw_resource to initialize.
205 * @obj_type: Resource object type.
206 * @delay_id: Boolean whether to defer device id allocation until
207 * the first validation.
208 * @res_free: Resource destructor.
209 * @func: Resource function table.
211 int vmw_resource_init(struct vmw_private
*dev_priv
, struct vmw_resource
*res
,
213 void (*res_free
) (struct vmw_resource
*res
),
214 const struct vmw_res_func
*func
)
216 kref_init(&res
->kref
);
217 res
->hw_destroy
= NULL
;
218 res
->res_free
= res_free
;
219 res
->dev_priv
= dev_priv
;
221 RB_CLEAR_NODE(&res
->mob_node
);
222 INIT_LIST_HEAD(&res
->lru_head
);
223 INIT_LIST_HEAD(&res
->binding_head
);
226 res
->backup_offset
= 0;
227 res
->backup_dirty
= false;
228 res
->res_dirty
= false;
229 res
->coherent
= false;
235 return vmw_resource_alloc_id(res
);
240 * vmw_user_resource_lookup_handle - lookup a struct resource from a
241 * TTM user-space handle and perform basic type checks
243 * @dev_priv: Pointer to a device private struct
244 * @tfile: Pointer to a struct ttm_object_file identifying the caller
245 * @handle: The TTM user-space handle
246 * @converter: Pointer to an object describing the resource type
247 * @p_res: On successful return the location pointed to will contain
248 * a pointer to a refcounted struct vmw_resource.
250 * If the handle can't be found or is associated with an incorrect resource
251 * type, -EINVAL will be returned.
253 int vmw_user_resource_lookup_handle(struct vmw_private
*dev_priv
,
254 struct ttm_object_file
*tfile
,
256 const struct vmw_user_resource_conv
258 struct vmw_resource
**p_res
)
260 struct ttm_base_object
*base
;
261 struct vmw_resource
*res
;
264 base
= ttm_base_object_lookup(tfile
, handle
);
265 if (unlikely(base
== NULL
))
268 if (unlikely(ttm_base_object_type(base
) != converter
->object_type
))
269 goto out_bad_resource
;
271 res
= converter
->base_obj_to_res(base
);
272 kref_get(&res
->kref
);
278 ttm_base_object_unref(&base
);
284 * vmw_user_resource_lookup_handle - lookup a struct resource from a
285 * TTM user-space handle and perform basic type checks
287 * @dev_priv: Pointer to a device private struct
288 * @tfile: Pointer to a struct ttm_object_file identifying the caller
289 * @handle: The TTM user-space handle
290 * @converter: Pointer to an object describing the resource type
291 * @p_res: On successful return the location pointed to will contain
292 * a pointer to a refcounted struct vmw_resource.
294 * If the handle can't be found or is associated with an incorrect resource
295 * type, -EINVAL will be returned.
297 struct vmw_resource
*
298 vmw_user_resource_noref_lookup_handle(struct vmw_private
*dev_priv
,
299 struct ttm_object_file
*tfile
,
301 const struct vmw_user_resource_conv
304 struct ttm_base_object
*base
;
306 base
= ttm_base_object_noref_lookup(tfile
, handle
);
308 return ERR_PTR(-ESRCH
);
310 if (unlikely(ttm_base_object_type(base
) != converter
->object_type
)) {
311 ttm_base_object_noref_release();
312 return ERR_PTR(-EINVAL
);
315 return converter
->base_obj_to_res(base
);
319 * Helper function that looks either a surface or bo.
321 * The pointer this pointed at by out_surf and out_buf needs to be null.
323 int vmw_user_lookup_handle(struct vmw_private
*dev_priv
,
324 struct ttm_object_file
*tfile
,
326 struct vmw_surface
**out_surf
,
327 struct vmw_buffer_object
**out_buf
)
329 struct vmw_resource
*res
;
332 BUG_ON(*out_surf
|| *out_buf
);
334 ret
= vmw_user_resource_lookup_handle(dev_priv
, tfile
, handle
,
335 user_surface_converter
,
338 *out_surf
= vmw_res_to_srf(res
);
343 ret
= vmw_user_bo_lookup(tfile
, handle
, out_buf
, NULL
);
348 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
350 * @res: The resource for which to allocate a backup buffer.
351 * @interruptible: Whether any sleeps during allocation should be
352 * performed while interruptible.
354 static int vmw_resource_buf_alloc(struct vmw_resource
*res
,
358 (res
->backup_size
+ PAGE_SIZE
- 1) & PAGE_MASK
;
359 struct vmw_buffer_object
*backup
;
362 if (likely(res
->backup
)) {
363 BUG_ON(res
->backup
->base
.num_pages
* PAGE_SIZE
< size
);
367 backup
= kzalloc(sizeof(*backup
), GFP_KERNEL
);
368 if (unlikely(!backup
))
371 ret
= vmw_bo_init(res
->dev_priv
, backup
, res
->backup_size
,
372 res
->func
->backup_placement
,
373 interruptible
, false,
375 if (unlikely(ret
!= 0))
378 res
->backup
= backup
;
385 * vmw_resource_do_validate - Make a resource up-to-date and visible
388 * @res: The resource to make visible to the device.
389 * @val_buf: Information about a buffer possibly
390 * containing backup data if a bind operation is needed.
392 * On hardware resource shortage, this function returns -EBUSY and
393 * should be retried once resources have been freed up.
395 static int vmw_resource_do_validate(struct vmw_resource
*res
,
396 struct ttm_validate_buffer
*val_buf
,
400 const struct vmw_res_func
*func
= res
->func
;
402 if (unlikely(res
->id
== -1)) {
403 ret
= func
->create(res
);
404 if (unlikely(ret
!= 0))
409 ((func
->needs_backup
&& !vmw_resource_mob_attached(res
) &&
410 val_buf
->bo
!= NULL
) ||
411 (!func
->needs_backup
&& val_buf
->bo
!= NULL
))) {
412 ret
= func
->bind(res
, val_buf
);
413 if (unlikely(ret
!= 0))
414 goto out_bind_failed
;
415 if (func
->needs_backup
)
416 vmw_resource_mob_attach(res
);
420 * Handle the case where the backup mob is marked coherent but
421 * the resource isn't.
423 if (func
->dirty_alloc
&& vmw_resource_mob_attached(res
) &&
425 if (res
->backup
->dirty
&& !res
->dirty
) {
426 ret
= func
->dirty_alloc(res
);
429 } else if (!res
->backup
->dirty
&& res
->dirty
) {
430 func
->dirty_free(res
);
435 * Transfer the dirty regions to the resource and update
439 if (dirtying
&& !res
->res_dirty
) {
440 pgoff_t start
= res
->backup_offset
>> PAGE_SHIFT
;
441 pgoff_t end
= __KERNEL_DIV_ROUND_UP
442 (res
->backup_offset
+ res
->backup_size
,
445 vmw_bo_dirty_unmap(res
->backup
, start
, end
);
448 vmw_bo_dirty_transfer_to_res(res
);
449 return func
->dirty_sync(res
);
461 * vmw_resource_unreserve - Unreserve a resource previously reserved for
462 * command submission.
464 * @res: Pointer to the struct vmw_resource to unreserve.
465 * @dirty_set: Change dirty status of the resource.
466 * @dirty: When changing dirty status indicates the new status.
467 * @switch_backup: Backup buffer has been switched.
468 * @new_backup: Pointer to new backup buffer if command submission
469 * switched. May be NULL.
470 * @new_backup_offset: New backup offset if @switch_backup is true.
472 * Currently unreserving a resource means putting it back on the device's
473 * resource lru list, so that it can be evicted if necessary.
475 void vmw_resource_unreserve(struct vmw_resource
*res
,
479 struct vmw_buffer_object
*new_backup
,
480 unsigned long new_backup_offset
)
482 struct vmw_private
*dev_priv
= res
->dev_priv
;
484 if (!list_empty(&res
->lru_head
))
487 if (switch_backup
&& new_backup
!= res
->backup
) {
489 vmw_resource_mob_detach(res
);
491 vmw_bo_dirty_release(res
->backup
);
492 vmw_bo_unreference(&res
->backup
);
496 res
->backup
= vmw_bo_reference(new_backup
);
499 * The validation code should already have added a
500 * dirty tracker here.
502 WARN_ON(res
->coherent
&& !new_backup
->dirty
);
504 vmw_resource_mob_attach(res
);
508 } else if (switch_backup
&& res
->coherent
) {
509 vmw_bo_dirty_release(res
->backup
);
513 res
->backup_offset
= new_backup_offset
;
516 res
->res_dirty
= dirty
;
518 if (!res
->func
->may_evict
|| res
->id
== -1 || res
->pin_count
)
521 spin_lock(&dev_priv
->resource_lock
);
522 list_add_tail(&res
->lru_head
,
523 &res
->dev_priv
->res_lru
[res
->func
->res_type
]);
524 spin_unlock(&dev_priv
->resource_lock
);
528 * vmw_resource_check_buffer - Check whether a backup buffer is needed
529 * for a resource and in that case, allocate
530 * one, reserve and validate it.
532 * @ticket: The ww aqcquire context to use, or NULL if trylocking.
533 * @res: The resource for which to allocate a backup buffer.
534 * @interruptible: Whether any sleeps during allocation should be
535 * performed while interruptible.
536 * @val_buf: On successful return contains data about the
537 * reserved and validated backup buffer.
540 vmw_resource_check_buffer(struct ww_acquire_ctx
*ticket
,
541 struct vmw_resource
*res
,
543 struct ttm_validate_buffer
*val_buf
)
545 struct ttm_operation_ctx ctx
= { true, false };
546 struct list_head val_list
;
547 bool backup_dirty
= false;
550 if (unlikely(res
->backup
== NULL
)) {
551 ret
= vmw_resource_buf_alloc(res
, interruptible
);
552 if (unlikely(ret
!= 0))
556 INIT_LIST_HEAD(&val_list
);
557 ttm_bo_get(&res
->backup
->base
);
558 val_buf
->bo
= &res
->backup
->base
;
559 val_buf
->num_shared
= 0;
560 list_add_tail(&val_buf
->head
, &val_list
);
561 ret
= ttm_eu_reserve_buffers(ticket
, &val_list
, interruptible
, NULL
);
562 if (unlikely(ret
!= 0))
565 if (res
->func
->needs_backup
&& !vmw_resource_mob_attached(res
))
568 backup_dirty
= res
->backup_dirty
;
569 ret
= ttm_bo_validate(&res
->backup
->base
,
570 res
->func
->backup_placement
,
573 if (unlikely(ret
!= 0))
574 goto out_no_validate
;
579 ttm_eu_backoff_reservation(ticket
, &val_list
);
581 ttm_bo_put(val_buf
->bo
);
584 vmw_bo_unreference(&res
->backup
);
590 * vmw_resource_reserve - Reserve a resource for command submission
592 * @res: The resource to reserve.
594 * This function takes the resource off the LRU list and make sure
595 * a backup buffer is present for guest-backed resources. However,
596 * the buffer may not be bound to the resource at this point.
599 int vmw_resource_reserve(struct vmw_resource
*res
, bool interruptible
,
602 struct vmw_private
*dev_priv
= res
->dev_priv
;
605 spin_lock(&dev_priv
->resource_lock
);
606 list_del_init(&res
->lru_head
);
607 spin_unlock(&dev_priv
->resource_lock
);
609 if (res
->func
->needs_backup
&& res
->backup
== NULL
&&
611 ret
= vmw_resource_buf_alloc(res
, interruptible
);
612 if (unlikely(ret
!= 0)) {
613 DRM_ERROR("Failed to allocate a backup buffer "
614 "of size %lu. bytes\n",
615 (unsigned long) res
->backup_size
);
624 * vmw_resource_backoff_reservation - Unreserve and unreference a
627 * @ticket: The ww acquire ctx used for reservation.
628 * @val_buf: Backup buffer information.
631 vmw_resource_backoff_reservation(struct ww_acquire_ctx
*ticket
,
632 struct ttm_validate_buffer
*val_buf
)
634 struct list_head val_list
;
636 if (likely(val_buf
->bo
== NULL
))
639 INIT_LIST_HEAD(&val_list
);
640 list_add_tail(&val_buf
->head
, &val_list
);
641 ttm_eu_backoff_reservation(ticket
, &val_list
);
642 ttm_bo_put(val_buf
->bo
);
647 * vmw_resource_do_evict - Evict a resource, and transfer its data
648 * to a backup buffer.
650 * @ticket: The ww acquire ticket to use, or NULL if trylocking.
651 * @res: The resource to evict.
652 * @interruptible: Whether to wait interruptible.
654 static int vmw_resource_do_evict(struct ww_acquire_ctx
*ticket
,
655 struct vmw_resource
*res
, bool interruptible
)
657 struct ttm_validate_buffer val_buf
;
658 const struct vmw_res_func
*func
= res
->func
;
661 BUG_ON(!func
->may_evict
);
664 val_buf
.num_shared
= 0;
665 ret
= vmw_resource_check_buffer(ticket
, res
, interruptible
, &val_buf
);
666 if (unlikely(ret
!= 0))
669 if (unlikely(func
->unbind
!= NULL
&&
670 (!func
->needs_backup
|| vmw_resource_mob_attached(res
)))) {
671 ret
= func
->unbind(res
, res
->res_dirty
, &val_buf
);
672 if (unlikely(ret
!= 0))
674 vmw_resource_mob_detach(res
);
676 ret
= func
->destroy(res
);
677 res
->backup_dirty
= true;
678 res
->res_dirty
= false;
680 vmw_resource_backoff_reservation(ticket
, &val_buf
);
687 * vmw_resource_validate - Make a resource up-to-date and visible
689 * @res: The resource to make visible to the device.
690 * @intr: Perform waits interruptible if possible.
691 * @dirtying: Pending GPU operation will dirty the resource
693 * On succesful return, any backup DMA buffer pointed to by @res->backup will
694 * be reserved and validated.
695 * On hardware resource shortage, this function will repeatedly evict
696 * resources of the same type until the validation succeeds.
698 * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
701 int vmw_resource_validate(struct vmw_resource
*res
, bool intr
,
705 struct vmw_resource
*evict_res
;
706 struct vmw_private
*dev_priv
= res
->dev_priv
;
707 struct list_head
*lru_list
= &dev_priv
->res_lru
[res
->func
->res_type
];
708 struct ttm_validate_buffer val_buf
;
709 unsigned err_count
= 0;
711 if (!res
->func
->create
)
715 val_buf
.num_shared
= 0;
717 val_buf
.bo
= &res
->backup
->base
;
719 ret
= vmw_resource_do_validate(res
, &val_buf
, dirtying
);
720 if (likely(ret
!= -EBUSY
))
723 spin_lock(&dev_priv
->resource_lock
);
724 if (list_empty(lru_list
) || !res
->func
->may_evict
) {
725 DRM_ERROR("Out of device device resources "
726 "for %s.\n", res
->func
->type_name
);
728 spin_unlock(&dev_priv
->resource_lock
);
732 evict_res
= vmw_resource_reference
733 (list_first_entry(lru_list
, struct vmw_resource
,
735 list_del_init(&evict_res
->lru_head
);
737 spin_unlock(&dev_priv
->resource_lock
);
739 /* Trylock backup buffers with a NULL ticket. */
740 ret
= vmw_resource_do_evict(NULL
, evict_res
, intr
);
741 if (unlikely(ret
!= 0)) {
742 spin_lock(&dev_priv
->resource_lock
);
743 list_add_tail(&evict_res
->lru_head
, lru_list
);
744 spin_unlock(&dev_priv
->resource_lock
);
745 if (ret
== -ERESTARTSYS
||
746 ++err_count
> VMW_RES_EVICT_ERR_COUNT
) {
747 vmw_resource_unreference(&evict_res
);
748 goto out_no_validate
;
752 vmw_resource_unreference(&evict_res
);
755 if (unlikely(ret
!= 0))
756 goto out_no_validate
;
757 else if (!res
->func
->needs_backup
&& res
->backup
) {
758 WARN_ON_ONCE(vmw_resource_mob_attached(res
));
759 vmw_bo_unreference(&res
->backup
);
770 * vmw_resource_unbind_list
772 * @vbo: Pointer to the current backing MOB.
774 * Evicts the Guest Backed hardware resource if the backup
775 * buffer is being moved out of MOB memory.
776 * Note that this function will not race with the resource
777 * validation code, since resource validation and eviction
778 * both require the backup buffer to be reserved.
780 void vmw_resource_unbind_list(struct vmw_buffer_object
*vbo
)
782 struct ttm_validate_buffer val_buf
= {
787 dma_resv_assert_held(vbo
->base
.base
.resv
);
788 while (!RB_EMPTY_ROOT(&vbo
->res_tree
)) {
789 struct rb_node
*node
= vbo
->res_tree
.rb_node
;
790 struct vmw_resource
*res
=
791 container_of(node
, struct vmw_resource
, mob_node
);
793 if (!WARN_ON_ONCE(!res
->func
->unbind
))
794 (void) res
->func
->unbind(res
, res
->res_dirty
, &val_buf
);
796 res
->backup_dirty
= true;
797 res
->res_dirty
= false;
798 vmw_resource_mob_detach(res
);
801 (void) ttm_bo_wait(&vbo
->base
, false, false);
806 * vmw_query_readback_all - Read back cached query states
808 * @dx_query_mob: Buffer containing the DX query MOB
810 * Read back cached states from the device if they exist. This function
811 * assumings binding_mutex is held.
813 int vmw_query_readback_all(struct vmw_buffer_object
*dx_query_mob
)
815 struct vmw_resource
*dx_query_ctx
;
816 struct vmw_private
*dev_priv
;
818 SVGA3dCmdHeader header
;
819 SVGA3dCmdDXReadbackAllQuery body
;
823 /* No query bound, so do nothing */
824 if (!dx_query_mob
|| !dx_query_mob
->dx_query_ctx
)
827 dx_query_ctx
= dx_query_mob
->dx_query_ctx
;
828 dev_priv
= dx_query_ctx
->dev_priv
;
830 cmd
= VMW_FIFO_RESERVE_DX(dev_priv
, sizeof(*cmd
), dx_query_ctx
->id
);
831 if (unlikely(cmd
== NULL
))
834 cmd
->header
.id
= SVGA_3D_CMD_DX_READBACK_ALL_QUERY
;
835 cmd
->header
.size
= sizeof(cmd
->body
);
836 cmd
->body
.cid
= dx_query_ctx
->id
;
838 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
840 /* Triggers a rebind the next time affected context is bound */
841 dx_query_mob
->dx_query_ctx
= NULL
;
849 * vmw_query_move_notify - Read back cached query states
851 * @bo: The TTM buffer object about to move.
852 * @mem: The memory region @bo is moving to.
854 * Called before the query MOB is swapped out to read back cached query
855 * states from the device.
857 void vmw_query_move_notify(struct ttm_buffer_object
*bo
,
858 struct ttm_resource
*mem
)
860 struct vmw_buffer_object
*dx_query_mob
;
861 struct ttm_bo_device
*bdev
= bo
->bdev
;
862 struct vmw_private
*dev_priv
;
865 dev_priv
= container_of(bdev
, struct vmw_private
, bdev
);
867 mutex_lock(&dev_priv
->binding_mutex
);
869 dx_query_mob
= container_of(bo
, struct vmw_buffer_object
, base
);
870 if (!dx_query_mob
|| !dx_query_mob
->dx_query_ctx
) {
871 mutex_unlock(&dev_priv
->binding_mutex
);
875 /* If BO is being moved from MOB to system memory */
876 if (mem
->mem_type
== TTM_PL_SYSTEM
&& bo
->mem
.mem_type
== VMW_PL_MOB
) {
877 struct vmw_fence_obj
*fence
;
879 (void) vmw_query_readback_all(dx_query_mob
);
880 mutex_unlock(&dev_priv
->binding_mutex
);
882 /* Create a fence and attach the BO to it */
883 (void) vmw_execbuf_fence_commands(NULL
, dev_priv
, &fence
, NULL
);
884 vmw_bo_fence_single(bo
, fence
);
887 vmw_fence_obj_unreference(&fence
);
889 (void) ttm_bo_wait(bo
, false, false);
891 mutex_unlock(&dev_priv
->binding_mutex
);
896 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
898 * @res: The resource being queried.
900 bool vmw_resource_needs_backup(const struct vmw_resource
*res
)
902 return res
->func
->needs_backup
;
906 * vmw_resource_evict_type - Evict all resources of a specific type
908 * @dev_priv: Pointer to a device private struct
909 * @type: The resource type to evict
911 * To avoid thrashing starvation or as part of the hibernation sequence,
912 * try to evict all evictable resources of a specific type.
914 static void vmw_resource_evict_type(struct vmw_private
*dev_priv
,
915 enum vmw_res_type type
)
917 struct list_head
*lru_list
= &dev_priv
->res_lru
[type
];
918 struct vmw_resource
*evict_res
;
919 unsigned err_count
= 0;
921 struct ww_acquire_ctx ticket
;
924 spin_lock(&dev_priv
->resource_lock
);
926 if (list_empty(lru_list
))
929 evict_res
= vmw_resource_reference(
930 list_first_entry(lru_list
, struct vmw_resource
,
932 list_del_init(&evict_res
->lru_head
);
933 spin_unlock(&dev_priv
->resource_lock
);
935 /* Wait lock backup buffers with a ticket. */
936 ret
= vmw_resource_do_evict(&ticket
, evict_res
, false);
937 if (unlikely(ret
!= 0)) {
938 spin_lock(&dev_priv
->resource_lock
);
939 list_add_tail(&evict_res
->lru_head
, lru_list
);
940 spin_unlock(&dev_priv
->resource_lock
);
941 if (++err_count
> VMW_RES_EVICT_ERR_COUNT
) {
942 vmw_resource_unreference(&evict_res
);
947 vmw_resource_unreference(&evict_res
);
951 spin_unlock(&dev_priv
->resource_lock
);
955 * vmw_resource_evict_all - Evict all evictable resources
957 * @dev_priv: Pointer to a device private struct
959 * To avoid thrashing starvation or as part of the hibernation sequence,
960 * evict all evictable resources. In particular this means that all
961 * guest-backed resources that are registered with the device are
962 * evicted and the OTable becomes clean.
964 void vmw_resource_evict_all(struct vmw_private
*dev_priv
)
966 enum vmw_res_type type
;
968 mutex_lock(&dev_priv
->cmdbuf_mutex
);
970 for (type
= 0; type
< vmw_res_max
; ++type
)
971 vmw_resource_evict_type(dev_priv
, type
);
973 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
977 * vmw_resource_pin - Add a pin reference on a resource
979 * @res: The resource to add a pin reference on
981 * This function adds a pin reference, and if needed validates the resource.
982 * Having a pin reference means that the resource can never be evicted, and
983 * its id will never change as long as there is a pin reference.
984 * This function returns 0 on success and a negative error code on failure.
986 int vmw_resource_pin(struct vmw_resource
*res
, bool interruptible
)
988 struct ttm_operation_ctx ctx
= { interruptible
, false };
989 struct vmw_private
*dev_priv
= res
->dev_priv
;
992 ttm_write_lock(&dev_priv
->reservation_sem
, interruptible
);
993 mutex_lock(&dev_priv
->cmdbuf_mutex
);
994 ret
= vmw_resource_reserve(res
, interruptible
, false);
998 if (res
->pin_count
== 0) {
999 struct vmw_buffer_object
*vbo
= NULL
;
1004 ttm_bo_reserve(&vbo
->base
, interruptible
, false, NULL
);
1005 if (!vbo
->base
.pin_count
) {
1006 ret
= ttm_bo_validate
1008 res
->func
->backup_placement
,
1011 ttm_bo_unreserve(&vbo
->base
);
1012 goto out_no_validate
;
1016 /* Do we really need to pin the MOB as well? */
1017 vmw_bo_pin_reserved(vbo
, true);
1019 ret
= vmw_resource_validate(res
, interruptible
, true);
1021 ttm_bo_unreserve(&vbo
->base
);
1023 goto out_no_validate
;
1028 vmw_resource_unreserve(res
, false, false, false, NULL
, 0UL);
1030 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
1031 ttm_write_unlock(&dev_priv
->reservation_sem
);
1037 * vmw_resource_unpin - Remove a pin reference from a resource
1039 * @res: The resource to remove a pin reference from
1041 * Having a pin reference means that the resource can never be evicted, and
1042 * its id will never change as long as there is a pin reference.
1044 void vmw_resource_unpin(struct vmw_resource
*res
)
1046 struct vmw_private
*dev_priv
= res
->dev_priv
;
1049 (void) ttm_read_lock(&dev_priv
->reservation_sem
, false);
1050 mutex_lock(&dev_priv
->cmdbuf_mutex
);
1052 ret
= vmw_resource_reserve(res
, false, true);
1055 WARN_ON(res
->pin_count
== 0);
1056 if (--res
->pin_count
== 0 && res
->backup
) {
1057 struct vmw_buffer_object
*vbo
= res
->backup
;
1059 (void) ttm_bo_reserve(&vbo
->base
, false, false, NULL
);
1060 vmw_bo_pin_reserved(vbo
, false);
1061 ttm_bo_unreserve(&vbo
->base
);
1064 vmw_resource_unreserve(res
, false, false, false, NULL
, 0UL);
1066 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
1067 ttm_read_unlock(&dev_priv
->reservation_sem
);
1071 * vmw_res_type - Return the resource type
1073 * @res: Pointer to the resource
1075 enum vmw_res_type
vmw_res_type(const struct vmw_resource
*res
)
1077 return res
->func
->res_type
;
1081 * vmw_resource_update_dirty - Update a resource's dirty tracker with a
1082 * sequential range of touched backing store memory.
1083 * @res: The resource.
1084 * @start: The first page touched.
1085 * @end: The last page touched + 1.
1087 void vmw_resource_dirty_update(struct vmw_resource
*res
, pgoff_t start
,
1091 res
->func
->dirty_range_add(res
, start
<< PAGE_SHIFT
,
1096 * vmw_resources_clean - Clean resources intersecting a mob range
1097 * @vbo: The mob buffer object
1098 * @start: The mob page offset starting the range
1099 * @end: The mob page offset ending the range
1100 * @num_prefault: Returns how many pages including the first have been
1101 * cleaned and are ok to prefault
1103 int vmw_resources_clean(struct vmw_buffer_object
*vbo
, pgoff_t start
,
1104 pgoff_t end
, pgoff_t
*num_prefault
)
1106 struct rb_node
*cur
= vbo
->res_tree
.rb_node
;
1107 struct vmw_resource
*found
= NULL
;
1108 unsigned long res_start
= start
<< PAGE_SHIFT
;
1109 unsigned long res_end
= end
<< PAGE_SHIFT
;
1110 unsigned long last_cleaned
= 0;
1113 * Find the resource with lowest backup_offset that intersects the
1117 struct vmw_resource
*cur_res
=
1118 container_of(cur
, struct vmw_resource
, mob_node
);
1120 if (cur_res
->backup_offset
>= res_end
) {
1122 } else if (cur_res
->backup_offset
+ cur_res
->backup_size
<=
1124 cur
= cur
->rb_right
;
1128 /* Continue to look for resources with lower offsets */
1133 * In order of increasing backup_offset, clean dirty resorces
1134 * intersecting the range.
1137 if (found
->res_dirty
) {
1140 if (!found
->func
->clean
)
1143 ret
= found
->func
->clean(found
);
1147 found
->res_dirty
= false;
1149 last_cleaned
= found
->backup_offset
+ found
->backup_size
;
1150 cur
= rb_next(&found
->mob_node
);
1154 found
= container_of(cur
, struct vmw_resource
, mob_node
);
1155 if (found
->backup_offset
>= res_end
)
1160 * Set number of pages allowed prefaulting and fence the buffer object
1163 if (last_cleaned
> res_start
) {
1164 struct ttm_buffer_object
*bo
= &vbo
->base
;
1166 *num_prefault
= __KERNEL_DIV_ROUND_UP(last_cleaned
- res_start
,
1168 vmw_bo_fence_single(bo
, NULL
);
1170 dma_fence_put(bo
->moving
);
1171 bo
->moving
= dma_fence_get
1172 (dma_resv_get_excl(bo
->base
.resv
));