perf bpf: Move perf_event_output() from stdio.h to bpf.h
[linux/fpc-iii.git] / drivers / gpu / drm / vmwgfx / vmwgfx_resource.c
blob8a029bade32a8561e0cf12e86d1e6eb28a69b002
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <drm/drmP.h>
32 #include "vmwgfx_resource_priv.h"
33 #include "vmwgfx_binding.h"
35 #define VMW_RES_EVICT_ERR_COUNT 10
37 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
39 kref_get(&res->kref);
40 return res;
43 struct vmw_resource *
44 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
46 return kref_get_unless_zero(&res->kref) ? res : NULL;
49 /**
50 * vmw_resource_release_id - release a resource id to the id manager.
52 * @res: Pointer to the resource.
54 * Release the resource id to the resource id manager and set it to -1
56 void vmw_resource_release_id(struct vmw_resource *res)
58 struct vmw_private *dev_priv = res->dev_priv;
59 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
61 spin_lock(&dev_priv->resource_lock);
62 if (res->id != -1)
63 idr_remove(idr, res->id);
64 res->id = -1;
65 spin_unlock(&dev_priv->resource_lock);
68 static void vmw_resource_release(struct kref *kref)
70 struct vmw_resource *res =
71 container_of(kref, struct vmw_resource, kref);
72 struct vmw_private *dev_priv = res->dev_priv;
73 int id;
74 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
76 spin_lock(&dev_priv->resource_lock);
77 list_del_init(&res->lru_head);
78 spin_unlock(&dev_priv->resource_lock);
79 if (res->backup) {
80 struct ttm_buffer_object *bo = &res->backup->base;
82 ttm_bo_reserve(bo, false, false, NULL);
83 if (!list_empty(&res->mob_head) &&
84 res->func->unbind != NULL) {
85 struct ttm_validate_buffer val_buf;
87 val_buf.bo = bo;
88 val_buf.shared = false;
89 res->func->unbind(res, false, &val_buf);
91 res->backup_dirty = false;
92 list_del_init(&res->mob_head);
93 ttm_bo_unreserve(bo);
94 vmw_bo_unreference(&res->backup);
97 if (likely(res->hw_destroy != NULL)) {
98 mutex_lock(&dev_priv->binding_mutex);
99 vmw_binding_res_list_kill(&res->binding_head);
100 mutex_unlock(&dev_priv->binding_mutex);
101 res->hw_destroy(res);
104 id = res->id;
105 if (res->res_free != NULL)
106 res->res_free(res);
107 else
108 kfree(res);
110 spin_lock(&dev_priv->resource_lock);
111 if (id != -1)
112 idr_remove(idr, id);
113 spin_unlock(&dev_priv->resource_lock);
116 void vmw_resource_unreference(struct vmw_resource **p_res)
118 struct vmw_resource *res = *p_res;
120 *p_res = NULL;
121 kref_put(&res->kref, vmw_resource_release);
126 * vmw_resource_alloc_id - release a resource id to the id manager.
128 * @res: Pointer to the resource.
130 * Allocate the lowest free resource from the resource manager, and set
131 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
133 int vmw_resource_alloc_id(struct vmw_resource *res)
135 struct vmw_private *dev_priv = res->dev_priv;
136 int ret;
137 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
139 BUG_ON(res->id != -1);
141 idr_preload(GFP_KERNEL);
142 spin_lock(&dev_priv->resource_lock);
144 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
145 if (ret >= 0)
146 res->id = ret;
148 spin_unlock(&dev_priv->resource_lock);
149 idr_preload_end();
150 return ret < 0 ? ret : 0;
154 * vmw_resource_init - initialize a struct vmw_resource
156 * @dev_priv: Pointer to a device private struct.
157 * @res: The struct vmw_resource to initialize.
158 * @obj_type: Resource object type.
159 * @delay_id: Boolean whether to defer device id allocation until
160 * the first validation.
161 * @res_free: Resource destructor.
162 * @func: Resource function table.
164 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
165 bool delay_id,
166 void (*res_free) (struct vmw_resource *res),
167 const struct vmw_res_func *func)
169 kref_init(&res->kref);
170 res->hw_destroy = NULL;
171 res->res_free = res_free;
172 res->dev_priv = dev_priv;
173 res->func = func;
174 INIT_LIST_HEAD(&res->lru_head);
175 INIT_LIST_HEAD(&res->mob_head);
176 INIT_LIST_HEAD(&res->binding_head);
177 res->id = -1;
178 res->backup = NULL;
179 res->backup_offset = 0;
180 res->backup_dirty = false;
181 res->res_dirty = false;
182 if (delay_id)
183 return 0;
184 else
185 return vmw_resource_alloc_id(res);
190 * vmw_user_resource_lookup_handle - lookup a struct resource from a
191 * TTM user-space handle and perform basic type checks
193 * @dev_priv: Pointer to a device private struct
194 * @tfile: Pointer to a struct ttm_object_file identifying the caller
195 * @handle: The TTM user-space handle
196 * @converter: Pointer to an object describing the resource type
197 * @p_res: On successful return the location pointed to will contain
198 * a pointer to a refcounted struct vmw_resource.
200 * If the handle can't be found or is associated with an incorrect resource
201 * type, -EINVAL will be returned.
203 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
204 struct ttm_object_file *tfile,
205 uint32_t handle,
206 const struct vmw_user_resource_conv
207 *converter,
208 struct vmw_resource **p_res)
210 struct ttm_base_object *base;
211 struct vmw_resource *res;
212 int ret = -EINVAL;
214 base = ttm_base_object_lookup(tfile, handle);
215 if (unlikely(base == NULL))
216 return -EINVAL;
218 if (unlikely(ttm_base_object_type(base) != converter->object_type))
219 goto out_bad_resource;
221 res = converter->base_obj_to_res(base);
222 kref_get(&res->kref);
224 *p_res = res;
225 ret = 0;
227 out_bad_resource:
228 ttm_base_object_unref(&base);
230 return ret;
234 * vmw_user_resource_lookup_handle - lookup a struct resource from a
235 * TTM user-space handle and perform basic type checks
237 * @dev_priv: Pointer to a device private struct
238 * @tfile: Pointer to a struct ttm_object_file identifying the caller
239 * @handle: The TTM user-space handle
240 * @converter: Pointer to an object describing the resource type
241 * @p_res: On successful return the location pointed to will contain
242 * a pointer to a refcounted struct vmw_resource.
244 * If the handle can't be found or is associated with an incorrect resource
245 * type, -EINVAL will be returned.
247 struct vmw_resource *
248 vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
249 struct ttm_object_file *tfile,
250 uint32_t handle,
251 const struct vmw_user_resource_conv
252 *converter)
254 struct ttm_base_object *base;
256 base = ttm_base_object_noref_lookup(tfile, handle);
257 if (!base)
258 return ERR_PTR(-ESRCH);
260 if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
261 ttm_base_object_noref_release();
262 return ERR_PTR(-EINVAL);
265 return converter->base_obj_to_res(base);
269 * Helper function that looks either a surface or bo.
271 * The pointer this pointed at by out_surf and out_buf needs to be null.
273 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
274 struct ttm_object_file *tfile,
275 uint32_t handle,
276 struct vmw_surface **out_surf,
277 struct vmw_buffer_object **out_buf)
279 struct vmw_resource *res;
280 int ret;
282 BUG_ON(*out_surf || *out_buf);
284 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
285 user_surface_converter,
286 &res);
287 if (!ret) {
288 *out_surf = vmw_res_to_srf(res);
289 return 0;
292 *out_surf = NULL;
293 ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
294 return ret;
298 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
300 * @res: The resource for which to allocate a backup buffer.
301 * @interruptible: Whether any sleeps during allocation should be
302 * performed while interruptible.
304 static int vmw_resource_buf_alloc(struct vmw_resource *res,
305 bool interruptible)
307 unsigned long size =
308 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
309 struct vmw_buffer_object *backup;
310 int ret;
312 if (likely(res->backup)) {
313 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
314 return 0;
317 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
318 if (unlikely(!backup))
319 return -ENOMEM;
321 ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
322 res->func->backup_placement,
323 interruptible,
324 &vmw_bo_bo_free);
325 if (unlikely(ret != 0))
326 goto out_no_bo;
328 res->backup = backup;
330 out_no_bo:
331 return ret;
335 * vmw_resource_do_validate - Make a resource up-to-date and visible
336 * to the device.
338 * @res: The resource to make visible to the device.
339 * @val_buf: Information about a buffer possibly
340 * containing backup data if a bind operation is needed.
342 * On hardware resource shortage, this function returns -EBUSY and
343 * should be retried once resources have been freed up.
345 static int vmw_resource_do_validate(struct vmw_resource *res,
346 struct ttm_validate_buffer *val_buf)
348 int ret = 0;
349 const struct vmw_res_func *func = res->func;
351 if (unlikely(res->id == -1)) {
352 ret = func->create(res);
353 if (unlikely(ret != 0))
354 return ret;
357 if (func->bind &&
358 ((func->needs_backup && list_empty(&res->mob_head) &&
359 val_buf->bo != NULL) ||
360 (!func->needs_backup && val_buf->bo != NULL))) {
361 ret = func->bind(res, val_buf);
362 if (unlikely(ret != 0))
363 goto out_bind_failed;
364 if (func->needs_backup)
365 list_add_tail(&res->mob_head, &res->backup->res_list);
369 * Only do this on write operations, and move to
370 * vmw_resource_unreserve if it can be called after
371 * backup buffers have been unreserved. Otherwise
372 * sort out locking.
374 res->res_dirty = true;
376 return 0;
378 out_bind_failed:
379 func->destroy(res);
381 return ret;
385 * vmw_resource_unreserve - Unreserve a resource previously reserved for
386 * command submission.
388 * @res: Pointer to the struct vmw_resource to unreserve.
389 * @switch_backup: Backup buffer has been switched.
390 * @new_backup: Pointer to new backup buffer if command submission
391 * switched. May be NULL.
392 * @new_backup_offset: New backup offset if @switch_backup is true.
394 * Currently unreserving a resource means putting it back on the device's
395 * resource lru list, so that it can be evicted if necessary.
397 void vmw_resource_unreserve(struct vmw_resource *res,
398 bool switch_backup,
399 struct vmw_buffer_object *new_backup,
400 unsigned long new_backup_offset)
402 struct vmw_private *dev_priv = res->dev_priv;
404 if (!list_empty(&res->lru_head))
405 return;
407 if (switch_backup && new_backup != res->backup) {
408 if (res->backup) {
409 lockdep_assert_held(&res->backup->base.resv->lock.base);
410 list_del_init(&res->mob_head);
411 vmw_bo_unreference(&res->backup);
414 if (new_backup) {
415 res->backup = vmw_bo_reference(new_backup);
416 lockdep_assert_held(&new_backup->base.resv->lock.base);
417 list_add_tail(&res->mob_head, &new_backup->res_list);
418 } else {
419 res->backup = NULL;
422 if (switch_backup)
423 res->backup_offset = new_backup_offset;
425 if (!res->func->may_evict || res->id == -1 || res->pin_count)
426 return;
428 spin_lock(&dev_priv->resource_lock);
429 list_add_tail(&res->lru_head,
430 &res->dev_priv->res_lru[res->func->res_type]);
431 spin_unlock(&dev_priv->resource_lock);
435 * vmw_resource_check_buffer - Check whether a backup buffer is needed
436 * for a resource and in that case, allocate
437 * one, reserve and validate it.
439 * @ticket: The ww aqcquire context to use, or NULL if trylocking.
440 * @res: The resource for which to allocate a backup buffer.
441 * @interruptible: Whether any sleeps during allocation should be
442 * performed while interruptible.
443 * @val_buf: On successful return contains data about the
444 * reserved and validated backup buffer.
446 static int
447 vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
448 struct vmw_resource *res,
449 bool interruptible,
450 struct ttm_validate_buffer *val_buf)
452 struct ttm_operation_ctx ctx = { true, false };
453 struct list_head val_list;
454 bool backup_dirty = false;
455 int ret;
457 if (unlikely(res->backup == NULL)) {
458 ret = vmw_resource_buf_alloc(res, interruptible);
459 if (unlikely(ret != 0))
460 return ret;
463 INIT_LIST_HEAD(&val_list);
464 val_buf->bo = ttm_bo_reference(&res->backup->base);
465 val_buf->shared = false;
466 list_add_tail(&val_buf->head, &val_list);
467 ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
468 if (unlikely(ret != 0))
469 goto out_no_reserve;
471 if (res->func->needs_backup && list_empty(&res->mob_head))
472 return 0;
474 backup_dirty = res->backup_dirty;
475 ret = ttm_bo_validate(&res->backup->base,
476 res->func->backup_placement,
477 &ctx);
479 if (unlikely(ret != 0))
480 goto out_no_validate;
482 return 0;
484 out_no_validate:
485 ttm_eu_backoff_reservation(ticket, &val_list);
486 out_no_reserve:
487 ttm_bo_unref(&val_buf->bo);
488 if (backup_dirty)
489 vmw_bo_unreference(&res->backup);
491 return ret;
495 * vmw_resource_reserve - Reserve a resource for command submission
497 * @res: The resource to reserve.
499 * This function takes the resource off the LRU list and make sure
500 * a backup buffer is present for guest-backed resources. However,
501 * the buffer may not be bound to the resource at this point.
504 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
505 bool no_backup)
507 struct vmw_private *dev_priv = res->dev_priv;
508 int ret;
510 spin_lock(&dev_priv->resource_lock);
511 list_del_init(&res->lru_head);
512 spin_unlock(&dev_priv->resource_lock);
514 if (res->func->needs_backup && res->backup == NULL &&
515 !no_backup) {
516 ret = vmw_resource_buf_alloc(res, interruptible);
517 if (unlikely(ret != 0)) {
518 DRM_ERROR("Failed to allocate a backup buffer "
519 "of size %lu. bytes\n",
520 (unsigned long) res->backup_size);
521 return ret;
525 return 0;
529 * vmw_resource_backoff_reservation - Unreserve and unreference a
530 * backup buffer
532 * @ticket: The ww acquire ctx used for reservation.
533 * @val_buf: Backup buffer information.
535 static void
536 vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
537 struct ttm_validate_buffer *val_buf)
539 struct list_head val_list;
541 if (likely(val_buf->bo == NULL))
542 return;
544 INIT_LIST_HEAD(&val_list);
545 list_add_tail(&val_buf->head, &val_list);
546 ttm_eu_backoff_reservation(ticket, &val_list);
547 ttm_bo_unref(&val_buf->bo);
551 * vmw_resource_do_evict - Evict a resource, and transfer its data
552 * to a backup buffer.
554 * @ticket: The ww acquire ticket to use, or NULL if trylocking.
555 * @res: The resource to evict.
556 * @interruptible: Whether to wait interruptible.
558 static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
559 struct vmw_resource *res, bool interruptible)
561 struct ttm_validate_buffer val_buf;
562 const struct vmw_res_func *func = res->func;
563 int ret;
565 BUG_ON(!func->may_evict);
567 val_buf.bo = NULL;
568 val_buf.shared = false;
569 ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
570 if (unlikely(ret != 0))
571 return ret;
573 if (unlikely(func->unbind != NULL &&
574 (!func->needs_backup || !list_empty(&res->mob_head)))) {
575 ret = func->unbind(res, res->res_dirty, &val_buf);
576 if (unlikely(ret != 0))
577 goto out_no_unbind;
578 list_del_init(&res->mob_head);
580 ret = func->destroy(res);
581 res->backup_dirty = true;
582 res->res_dirty = false;
583 out_no_unbind:
584 vmw_resource_backoff_reservation(ticket, &val_buf);
586 return ret;
591 * vmw_resource_validate - Make a resource up-to-date and visible
592 * to the device.
593 * @res: The resource to make visible to the device.
594 * @intr: Perform waits interruptible if possible.
596 * On succesful return, any backup DMA buffer pointed to by @res->backup will
597 * be reserved and validated.
598 * On hardware resource shortage, this function will repeatedly evict
599 * resources of the same type until the validation succeeds.
601 * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
602 * on failure.
604 int vmw_resource_validate(struct vmw_resource *res, bool intr)
606 int ret;
607 struct vmw_resource *evict_res;
608 struct vmw_private *dev_priv = res->dev_priv;
609 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
610 struct ttm_validate_buffer val_buf;
611 unsigned err_count = 0;
613 if (!res->func->create)
614 return 0;
616 val_buf.bo = NULL;
617 val_buf.shared = false;
618 if (res->backup)
619 val_buf.bo = &res->backup->base;
620 do {
621 ret = vmw_resource_do_validate(res, &val_buf);
622 if (likely(ret != -EBUSY))
623 break;
625 spin_lock(&dev_priv->resource_lock);
626 if (list_empty(lru_list) || !res->func->may_evict) {
627 DRM_ERROR("Out of device device resources "
628 "for %s.\n", res->func->type_name);
629 ret = -EBUSY;
630 spin_unlock(&dev_priv->resource_lock);
631 break;
634 evict_res = vmw_resource_reference
635 (list_first_entry(lru_list, struct vmw_resource,
636 lru_head));
637 list_del_init(&evict_res->lru_head);
639 spin_unlock(&dev_priv->resource_lock);
641 /* Trylock backup buffers with a NULL ticket. */
642 ret = vmw_resource_do_evict(NULL, evict_res, intr);
643 if (unlikely(ret != 0)) {
644 spin_lock(&dev_priv->resource_lock);
645 list_add_tail(&evict_res->lru_head, lru_list);
646 spin_unlock(&dev_priv->resource_lock);
647 if (ret == -ERESTARTSYS ||
648 ++err_count > VMW_RES_EVICT_ERR_COUNT) {
649 vmw_resource_unreference(&evict_res);
650 goto out_no_validate;
654 vmw_resource_unreference(&evict_res);
655 } while (1);
657 if (unlikely(ret != 0))
658 goto out_no_validate;
659 else if (!res->func->needs_backup && res->backup) {
660 list_del_init(&res->mob_head);
661 vmw_bo_unreference(&res->backup);
664 return 0;
666 out_no_validate:
667 return ret;
672 * vmw_resource_unbind_list
674 * @vbo: Pointer to the current backing MOB.
676 * Evicts the Guest Backed hardware resource if the backup
677 * buffer is being moved out of MOB memory.
678 * Note that this function will not race with the resource
679 * validation code, since resource validation and eviction
680 * both require the backup buffer to be reserved.
682 void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
685 struct vmw_resource *res, *next;
686 struct ttm_validate_buffer val_buf = {
687 .bo = &vbo->base,
688 .shared = false
691 lockdep_assert_held(&vbo->base.resv->lock.base);
692 list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
693 if (!res->func->unbind)
694 continue;
696 (void) res->func->unbind(res, true, &val_buf);
697 res->backup_dirty = true;
698 res->res_dirty = false;
699 list_del_init(&res->mob_head);
702 (void) ttm_bo_wait(&vbo->base, false, false);
707 * vmw_query_readback_all - Read back cached query states
709 * @dx_query_mob: Buffer containing the DX query MOB
711 * Read back cached states from the device if they exist. This function
712 * assumings binding_mutex is held.
714 int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
716 struct vmw_resource *dx_query_ctx;
717 struct vmw_private *dev_priv;
718 struct {
719 SVGA3dCmdHeader header;
720 SVGA3dCmdDXReadbackAllQuery body;
721 } *cmd;
724 /* No query bound, so do nothing */
725 if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
726 return 0;
728 dx_query_ctx = dx_query_mob->dx_query_ctx;
729 dev_priv = dx_query_ctx->dev_priv;
731 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
732 if (unlikely(cmd == NULL)) {
733 DRM_ERROR("Failed reserving FIFO space for "
734 "query MOB read back.\n");
735 return -ENOMEM;
738 cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
739 cmd->header.size = sizeof(cmd->body);
740 cmd->body.cid = dx_query_ctx->id;
742 vmw_fifo_commit(dev_priv, sizeof(*cmd));
744 /* Triggers a rebind the next time affected context is bound */
745 dx_query_mob->dx_query_ctx = NULL;
747 return 0;
753 * vmw_query_move_notify - Read back cached query states
755 * @bo: The TTM buffer object about to move.
756 * @mem: The memory region @bo is moving to.
758 * Called before the query MOB is swapped out to read back cached query
759 * states from the device.
761 void vmw_query_move_notify(struct ttm_buffer_object *bo,
762 struct ttm_mem_reg *mem)
764 struct vmw_buffer_object *dx_query_mob;
765 struct ttm_bo_device *bdev = bo->bdev;
766 struct vmw_private *dev_priv;
769 dev_priv = container_of(bdev, struct vmw_private, bdev);
771 mutex_lock(&dev_priv->binding_mutex);
773 dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
774 if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
775 mutex_unlock(&dev_priv->binding_mutex);
776 return;
779 /* If BO is being moved from MOB to system memory */
780 if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
781 struct vmw_fence_obj *fence;
783 (void) vmw_query_readback_all(dx_query_mob);
784 mutex_unlock(&dev_priv->binding_mutex);
786 /* Create a fence and attach the BO to it */
787 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
788 vmw_bo_fence_single(bo, fence);
790 if (fence != NULL)
791 vmw_fence_obj_unreference(&fence);
793 (void) ttm_bo_wait(bo, false, false);
794 } else
795 mutex_unlock(&dev_priv->binding_mutex);
800 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
802 * @res: The resource being queried.
804 bool vmw_resource_needs_backup(const struct vmw_resource *res)
806 return res->func->needs_backup;
810 * vmw_resource_evict_type - Evict all resources of a specific type
812 * @dev_priv: Pointer to a device private struct
813 * @type: The resource type to evict
815 * To avoid thrashing starvation or as part of the hibernation sequence,
816 * try to evict all evictable resources of a specific type.
818 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
819 enum vmw_res_type type)
821 struct list_head *lru_list = &dev_priv->res_lru[type];
822 struct vmw_resource *evict_res;
823 unsigned err_count = 0;
824 int ret;
825 struct ww_acquire_ctx ticket;
827 do {
828 spin_lock(&dev_priv->resource_lock);
830 if (list_empty(lru_list))
831 goto out_unlock;
833 evict_res = vmw_resource_reference(
834 list_first_entry(lru_list, struct vmw_resource,
835 lru_head));
836 list_del_init(&evict_res->lru_head);
837 spin_unlock(&dev_priv->resource_lock);
839 /* Wait lock backup buffers with a ticket. */
840 ret = vmw_resource_do_evict(&ticket, evict_res, false);
841 if (unlikely(ret != 0)) {
842 spin_lock(&dev_priv->resource_lock);
843 list_add_tail(&evict_res->lru_head, lru_list);
844 spin_unlock(&dev_priv->resource_lock);
845 if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
846 vmw_resource_unreference(&evict_res);
847 return;
851 vmw_resource_unreference(&evict_res);
852 } while (1);
854 out_unlock:
855 spin_unlock(&dev_priv->resource_lock);
859 * vmw_resource_evict_all - Evict all evictable resources
861 * @dev_priv: Pointer to a device private struct
863 * To avoid thrashing starvation or as part of the hibernation sequence,
864 * evict all evictable resources. In particular this means that all
865 * guest-backed resources that are registered with the device are
866 * evicted and the OTable becomes clean.
868 void vmw_resource_evict_all(struct vmw_private *dev_priv)
870 enum vmw_res_type type;
872 mutex_lock(&dev_priv->cmdbuf_mutex);
874 for (type = 0; type < vmw_res_max; ++type)
875 vmw_resource_evict_type(dev_priv, type);
877 mutex_unlock(&dev_priv->cmdbuf_mutex);
881 * vmw_resource_pin - Add a pin reference on a resource
883 * @res: The resource to add a pin reference on
885 * This function adds a pin reference, and if needed validates the resource.
886 * Having a pin reference means that the resource can never be evicted, and
887 * its id will never change as long as there is a pin reference.
888 * This function returns 0 on success and a negative error code on failure.
890 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
892 struct ttm_operation_ctx ctx = { interruptible, false };
893 struct vmw_private *dev_priv = res->dev_priv;
894 int ret;
896 ttm_write_lock(&dev_priv->reservation_sem, interruptible);
897 mutex_lock(&dev_priv->cmdbuf_mutex);
898 ret = vmw_resource_reserve(res, interruptible, false);
899 if (ret)
900 goto out_no_reserve;
902 if (res->pin_count == 0) {
903 struct vmw_buffer_object *vbo = NULL;
905 if (res->backup) {
906 vbo = res->backup;
908 ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
909 if (!vbo->pin_count) {
910 ret = ttm_bo_validate
911 (&vbo->base,
912 res->func->backup_placement,
913 &ctx);
914 if (ret) {
915 ttm_bo_unreserve(&vbo->base);
916 goto out_no_validate;
920 /* Do we really need to pin the MOB as well? */
921 vmw_bo_pin_reserved(vbo, true);
923 ret = vmw_resource_validate(res, interruptible);
924 if (vbo)
925 ttm_bo_unreserve(&vbo->base);
926 if (ret)
927 goto out_no_validate;
929 res->pin_count++;
931 out_no_validate:
932 vmw_resource_unreserve(res, false, NULL, 0UL);
933 out_no_reserve:
934 mutex_unlock(&dev_priv->cmdbuf_mutex);
935 ttm_write_unlock(&dev_priv->reservation_sem);
937 return ret;
941 * vmw_resource_unpin - Remove a pin reference from a resource
943 * @res: The resource to remove a pin reference from
945 * Having a pin reference means that the resource can never be evicted, and
946 * its id will never change as long as there is a pin reference.
948 void vmw_resource_unpin(struct vmw_resource *res)
950 struct vmw_private *dev_priv = res->dev_priv;
951 int ret;
953 (void) ttm_read_lock(&dev_priv->reservation_sem, false);
954 mutex_lock(&dev_priv->cmdbuf_mutex);
956 ret = vmw_resource_reserve(res, false, true);
957 WARN_ON(ret);
959 WARN_ON(res->pin_count == 0);
960 if (--res->pin_count == 0 && res->backup) {
961 struct vmw_buffer_object *vbo = res->backup;
963 (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
964 vmw_bo_pin_reserved(vbo, false);
965 ttm_bo_unreserve(&vbo->base);
968 vmw_resource_unreserve(res, false, NULL, 0UL);
970 mutex_unlock(&dev_priv->cmdbuf_mutex);
971 ttm_read_unlock(&dev_priv->reservation_sem);
975 * vmw_res_type - Return the resource type
977 * @res: Pointer to the resource
979 enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
981 return res->func->res_type;