dt-bindings: mtd: ingenic: Use standard ecc-engine property
[linux/fpc-iii.git] / drivers / gpu / drm / vmwgfx / vmwgfx_resource.c
bloba7c30e567f0941ddfda8d89b9fb34a9885cbe715
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <drm/drmP.h>
32 #include "vmwgfx_resource_priv.h"
33 #include "vmwgfx_binding.h"
35 #define VMW_RES_EVICT_ERR_COUNT 10
37 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
39 kref_get(&res->kref);
40 return res;
43 struct vmw_resource *
44 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
46 return kref_get_unless_zero(&res->kref) ? res : NULL;
49 /**
50 * vmw_resource_release_id - release a resource id to the id manager.
52 * @res: Pointer to the resource.
54 * Release the resource id to the resource id manager and set it to -1
56 void vmw_resource_release_id(struct vmw_resource *res)
58 struct vmw_private *dev_priv = res->dev_priv;
59 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
61 spin_lock(&dev_priv->resource_lock);
62 if (res->id != -1)
63 idr_remove(idr, res->id);
64 res->id = -1;
65 spin_unlock(&dev_priv->resource_lock);
68 static void vmw_resource_release(struct kref *kref)
70 struct vmw_resource *res =
71 container_of(kref, struct vmw_resource, kref);
72 struct vmw_private *dev_priv = res->dev_priv;
73 int id;
74 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
76 spin_lock(&dev_priv->resource_lock);
77 list_del_init(&res->lru_head);
78 spin_unlock(&dev_priv->resource_lock);
79 if (res->backup) {
80 struct ttm_buffer_object *bo = &res->backup->base;
82 ttm_bo_reserve(bo, false, false, NULL);
83 if (!list_empty(&res->mob_head) &&
84 res->func->unbind != NULL) {
85 struct ttm_validate_buffer val_buf;
87 val_buf.bo = bo;
88 val_buf.num_shared = 0;
89 res->func->unbind(res, false, &val_buf);
91 res->backup_dirty = false;
92 list_del_init(&res->mob_head);
93 ttm_bo_unreserve(bo);
94 vmw_bo_unreference(&res->backup);
97 if (likely(res->hw_destroy != NULL)) {
98 mutex_lock(&dev_priv->binding_mutex);
99 vmw_binding_res_list_kill(&res->binding_head);
100 mutex_unlock(&dev_priv->binding_mutex);
101 res->hw_destroy(res);
104 id = res->id;
105 if (res->res_free != NULL)
106 res->res_free(res);
107 else
108 kfree(res);
110 spin_lock(&dev_priv->resource_lock);
111 if (id != -1)
112 idr_remove(idr, id);
113 spin_unlock(&dev_priv->resource_lock);
116 void vmw_resource_unreference(struct vmw_resource **p_res)
118 struct vmw_resource *res = *p_res;
120 *p_res = NULL;
121 kref_put(&res->kref, vmw_resource_release);
126 * vmw_resource_alloc_id - release a resource id to the id manager.
128 * @res: Pointer to the resource.
130 * Allocate the lowest free resource from the resource manager, and set
131 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
133 int vmw_resource_alloc_id(struct vmw_resource *res)
135 struct vmw_private *dev_priv = res->dev_priv;
136 int ret;
137 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
139 BUG_ON(res->id != -1);
141 idr_preload(GFP_KERNEL);
142 spin_lock(&dev_priv->resource_lock);
144 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
145 if (ret >= 0)
146 res->id = ret;
148 spin_unlock(&dev_priv->resource_lock);
149 idr_preload_end();
150 return ret < 0 ? ret : 0;
154 * vmw_resource_init - initialize a struct vmw_resource
156 * @dev_priv: Pointer to a device private struct.
157 * @res: The struct vmw_resource to initialize.
158 * @obj_type: Resource object type.
159 * @delay_id: Boolean whether to defer device id allocation until
160 * the first validation.
161 * @res_free: Resource destructor.
162 * @func: Resource function table.
164 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
165 bool delay_id,
166 void (*res_free) (struct vmw_resource *res),
167 const struct vmw_res_func *func)
169 kref_init(&res->kref);
170 res->hw_destroy = NULL;
171 res->res_free = res_free;
172 res->dev_priv = dev_priv;
173 res->func = func;
174 INIT_LIST_HEAD(&res->lru_head);
175 INIT_LIST_HEAD(&res->mob_head);
176 INIT_LIST_HEAD(&res->binding_head);
177 res->id = -1;
178 res->backup = NULL;
179 res->backup_offset = 0;
180 res->backup_dirty = false;
181 res->res_dirty = false;
182 if (delay_id)
183 return 0;
184 else
185 return vmw_resource_alloc_id(res);
190 * vmw_user_resource_lookup_handle - lookup a struct resource from a
191 * TTM user-space handle and perform basic type checks
193 * @dev_priv: Pointer to a device private struct
194 * @tfile: Pointer to a struct ttm_object_file identifying the caller
195 * @handle: The TTM user-space handle
196 * @converter: Pointer to an object describing the resource type
197 * @p_res: On successful return the location pointed to will contain
198 * a pointer to a refcounted struct vmw_resource.
200 * If the handle can't be found or is associated with an incorrect resource
201 * type, -EINVAL will be returned.
203 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
204 struct ttm_object_file *tfile,
205 uint32_t handle,
206 const struct vmw_user_resource_conv
207 *converter,
208 struct vmw_resource **p_res)
210 struct ttm_base_object *base;
211 struct vmw_resource *res;
212 int ret = -EINVAL;
214 base = ttm_base_object_lookup(tfile, handle);
215 if (unlikely(base == NULL))
216 return -EINVAL;
218 if (unlikely(ttm_base_object_type(base) != converter->object_type))
219 goto out_bad_resource;
221 res = converter->base_obj_to_res(base);
222 kref_get(&res->kref);
224 *p_res = res;
225 ret = 0;
227 out_bad_resource:
228 ttm_base_object_unref(&base);
230 return ret;
234 * vmw_user_resource_lookup_handle - lookup a struct resource from a
235 * TTM user-space handle and perform basic type checks
237 * @dev_priv: Pointer to a device private struct
238 * @tfile: Pointer to a struct ttm_object_file identifying the caller
239 * @handle: The TTM user-space handle
240 * @converter: Pointer to an object describing the resource type
241 * @p_res: On successful return the location pointed to will contain
242 * a pointer to a refcounted struct vmw_resource.
244 * If the handle can't be found or is associated with an incorrect resource
245 * type, -EINVAL will be returned.
247 struct vmw_resource *
248 vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
249 struct ttm_object_file *tfile,
250 uint32_t handle,
251 const struct vmw_user_resource_conv
252 *converter)
254 struct ttm_base_object *base;
256 base = ttm_base_object_noref_lookup(tfile, handle);
257 if (!base)
258 return ERR_PTR(-ESRCH);
260 if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
261 ttm_base_object_noref_release();
262 return ERR_PTR(-EINVAL);
265 return converter->base_obj_to_res(base);
269 * Helper function that looks either a surface or bo.
271 * The pointer this pointed at by out_surf and out_buf needs to be null.
273 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
274 struct ttm_object_file *tfile,
275 uint32_t handle,
276 struct vmw_surface **out_surf,
277 struct vmw_buffer_object **out_buf)
279 struct vmw_resource *res;
280 int ret;
282 BUG_ON(*out_surf || *out_buf);
284 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
285 user_surface_converter,
286 &res);
287 if (!ret) {
288 *out_surf = vmw_res_to_srf(res);
289 return 0;
292 *out_surf = NULL;
293 ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
294 return ret;
298 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
300 * @res: The resource for which to allocate a backup buffer.
301 * @interruptible: Whether any sleeps during allocation should be
302 * performed while interruptible.
304 static int vmw_resource_buf_alloc(struct vmw_resource *res,
305 bool interruptible)
307 unsigned long size =
308 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
309 struct vmw_buffer_object *backup;
310 int ret;
312 if (likely(res->backup)) {
313 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
314 return 0;
317 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
318 if (unlikely(!backup))
319 return -ENOMEM;
321 ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
322 res->func->backup_placement,
323 interruptible,
324 &vmw_bo_bo_free);
325 if (unlikely(ret != 0))
326 goto out_no_bo;
328 res->backup = backup;
330 out_no_bo:
331 return ret;
335 * vmw_resource_do_validate - Make a resource up-to-date and visible
336 * to the device.
338 * @res: The resource to make visible to the device.
339 * @val_buf: Information about a buffer possibly
340 * containing backup data if a bind operation is needed.
342 * On hardware resource shortage, this function returns -EBUSY and
343 * should be retried once resources have been freed up.
345 static int vmw_resource_do_validate(struct vmw_resource *res,
346 struct ttm_validate_buffer *val_buf)
348 int ret = 0;
349 const struct vmw_res_func *func = res->func;
351 if (unlikely(res->id == -1)) {
352 ret = func->create(res);
353 if (unlikely(ret != 0))
354 return ret;
357 if (func->bind &&
358 ((func->needs_backup && list_empty(&res->mob_head) &&
359 val_buf->bo != NULL) ||
360 (!func->needs_backup && val_buf->bo != NULL))) {
361 ret = func->bind(res, val_buf);
362 if (unlikely(ret != 0))
363 goto out_bind_failed;
364 if (func->needs_backup)
365 list_add_tail(&res->mob_head, &res->backup->res_list);
369 * Only do this on write operations, and move to
370 * vmw_resource_unreserve if it can be called after
371 * backup buffers have been unreserved. Otherwise
372 * sort out locking.
374 res->res_dirty = true;
376 return 0;
378 out_bind_failed:
379 func->destroy(res);
381 return ret;
385 * vmw_resource_unreserve - Unreserve a resource previously reserved for
386 * command submission.
388 * @res: Pointer to the struct vmw_resource to unreserve.
389 * @switch_backup: Backup buffer has been switched.
390 * @new_backup: Pointer to new backup buffer if command submission
391 * switched. May be NULL.
392 * @new_backup_offset: New backup offset if @switch_backup is true.
394 * Currently unreserving a resource means putting it back on the device's
395 * resource lru list, so that it can be evicted if necessary.
397 void vmw_resource_unreserve(struct vmw_resource *res,
398 bool switch_backup,
399 struct vmw_buffer_object *new_backup,
400 unsigned long new_backup_offset)
402 struct vmw_private *dev_priv = res->dev_priv;
404 if (!list_empty(&res->lru_head))
405 return;
407 if (switch_backup && new_backup != res->backup) {
408 if (res->backup) {
409 lockdep_assert_held(&res->backup->base.resv->lock.base);
410 list_del_init(&res->mob_head);
411 vmw_bo_unreference(&res->backup);
414 if (new_backup) {
415 res->backup = vmw_bo_reference(new_backup);
416 lockdep_assert_held(&new_backup->base.resv->lock.base);
417 list_add_tail(&res->mob_head, &new_backup->res_list);
418 } else {
419 res->backup = NULL;
422 if (switch_backup)
423 res->backup_offset = new_backup_offset;
425 if (!res->func->may_evict || res->id == -1 || res->pin_count)
426 return;
428 spin_lock(&dev_priv->resource_lock);
429 list_add_tail(&res->lru_head,
430 &res->dev_priv->res_lru[res->func->res_type]);
431 spin_unlock(&dev_priv->resource_lock);
435 * vmw_resource_check_buffer - Check whether a backup buffer is needed
436 * for a resource and in that case, allocate
437 * one, reserve and validate it.
439 * @ticket: The ww aqcquire context to use, or NULL if trylocking.
440 * @res: The resource for which to allocate a backup buffer.
441 * @interruptible: Whether any sleeps during allocation should be
442 * performed while interruptible.
443 * @val_buf: On successful return contains data about the
444 * reserved and validated backup buffer.
446 static int
447 vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
448 struct vmw_resource *res,
449 bool interruptible,
450 struct ttm_validate_buffer *val_buf)
452 struct ttm_operation_ctx ctx = { true, false };
453 struct list_head val_list;
454 bool backup_dirty = false;
455 int ret;
457 if (unlikely(res->backup == NULL)) {
458 ret = vmw_resource_buf_alloc(res, interruptible);
459 if (unlikely(ret != 0))
460 return ret;
463 INIT_LIST_HEAD(&val_list);
464 ttm_bo_get(&res->backup->base);
465 val_buf->bo = &res->backup->base;
466 val_buf->num_shared = 0;
467 list_add_tail(&val_buf->head, &val_list);
468 ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
469 if (unlikely(ret != 0))
470 goto out_no_reserve;
472 if (res->func->needs_backup && list_empty(&res->mob_head))
473 return 0;
475 backup_dirty = res->backup_dirty;
476 ret = ttm_bo_validate(&res->backup->base,
477 res->func->backup_placement,
478 &ctx);
480 if (unlikely(ret != 0))
481 goto out_no_validate;
483 return 0;
485 out_no_validate:
486 ttm_eu_backoff_reservation(ticket, &val_list);
487 out_no_reserve:
488 ttm_bo_put(val_buf->bo);
489 val_buf->bo = NULL;
490 if (backup_dirty)
491 vmw_bo_unreference(&res->backup);
493 return ret;
497 * vmw_resource_reserve - Reserve a resource for command submission
499 * @res: The resource to reserve.
501 * This function takes the resource off the LRU list and make sure
502 * a backup buffer is present for guest-backed resources. However,
503 * the buffer may not be bound to the resource at this point.
506 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
507 bool no_backup)
509 struct vmw_private *dev_priv = res->dev_priv;
510 int ret;
512 spin_lock(&dev_priv->resource_lock);
513 list_del_init(&res->lru_head);
514 spin_unlock(&dev_priv->resource_lock);
516 if (res->func->needs_backup && res->backup == NULL &&
517 !no_backup) {
518 ret = vmw_resource_buf_alloc(res, interruptible);
519 if (unlikely(ret != 0)) {
520 DRM_ERROR("Failed to allocate a backup buffer "
521 "of size %lu. bytes\n",
522 (unsigned long) res->backup_size);
523 return ret;
527 return 0;
531 * vmw_resource_backoff_reservation - Unreserve and unreference a
532 * backup buffer
534 * @ticket: The ww acquire ctx used for reservation.
535 * @val_buf: Backup buffer information.
537 static void
538 vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
539 struct ttm_validate_buffer *val_buf)
541 struct list_head val_list;
543 if (likely(val_buf->bo == NULL))
544 return;
546 INIT_LIST_HEAD(&val_list);
547 list_add_tail(&val_buf->head, &val_list);
548 ttm_eu_backoff_reservation(ticket, &val_list);
549 ttm_bo_put(val_buf->bo);
550 val_buf->bo = NULL;
554 * vmw_resource_do_evict - Evict a resource, and transfer its data
555 * to a backup buffer.
557 * @ticket: The ww acquire ticket to use, or NULL if trylocking.
558 * @res: The resource to evict.
559 * @interruptible: Whether to wait interruptible.
561 static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
562 struct vmw_resource *res, bool interruptible)
564 struct ttm_validate_buffer val_buf;
565 const struct vmw_res_func *func = res->func;
566 int ret;
568 BUG_ON(!func->may_evict);
570 val_buf.bo = NULL;
571 val_buf.num_shared = 0;
572 ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
573 if (unlikely(ret != 0))
574 return ret;
576 if (unlikely(func->unbind != NULL &&
577 (!func->needs_backup || !list_empty(&res->mob_head)))) {
578 ret = func->unbind(res, res->res_dirty, &val_buf);
579 if (unlikely(ret != 0))
580 goto out_no_unbind;
581 list_del_init(&res->mob_head);
583 ret = func->destroy(res);
584 res->backup_dirty = true;
585 res->res_dirty = false;
586 out_no_unbind:
587 vmw_resource_backoff_reservation(ticket, &val_buf);
589 return ret;
594 * vmw_resource_validate - Make a resource up-to-date and visible
595 * to the device.
596 * @res: The resource to make visible to the device.
597 * @intr: Perform waits interruptible if possible.
599 * On succesful return, any backup DMA buffer pointed to by @res->backup will
600 * be reserved and validated.
601 * On hardware resource shortage, this function will repeatedly evict
602 * resources of the same type until the validation succeeds.
604 * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
605 * on failure.
607 int vmw_resource_validate(struct vmw_resource *res, bool intr)
609 int ret;
610 struct vmw_resource *evict_res;
611 struct vmw_private *dev_priv = res->dev_priv;
612 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
613 struct ttm_validate_buffer val_buf;
614 unsigned err_count = 0;
616 if (!res->func->create)
617 return 0;
619 val_buf.bo = NULL;
620 val_buf.num_shared = 0;
621 if (res->backup)
622 val_buf.bo = &res->backup->base;
623 do {
624 ret = vmw_resource_do_validate(res, &val_buf);
625 if (likely(ret != -EBUSY))
626 break;
628 spin_lock(&dev_priv->resource_lock);
629 if (list_empty(lru_list) || !res->func->may_evict) {
630 DRM_ERROR("Out of device device resources "
631 "for %s.\n", res->func->type_name);
632 ret = -EBUSY;
633 spin_unlock(&dev_priv->resource_lock);
634 break;
637 evict_res = vmw_resource_reference
638 (list_first_entry(lru_list, struct vmw_resource,
639 lru_head));
640 list_del_init(&evict_res->lru_head);
642 spin_unlock(&dev_priv->resource_lock);
644 /* Trylock backup buffers with a NULL ticket. */
645 ret = vmw_resource_do_evict(NULL, evict_res, intr);
646 if (unlikely(ret != 0)) {
647 spin_lock(&dev_priv->resource_lock);
648 list_add_tail(&evict_res->lru_head, lru_list);
649 spin_unlock(&dev_priv->resource_lock);
650 if (ret == -ERESTARTSYS ||
651 ++err_count > VMW_RES_EVICT_ERR_COUNT) {
652 vmw_resource_unreference(&evict_res);
653 goto out_no_validate;
657 vmw_resource_unreference(&evict_res);
658 } while (1);
660 if (unlikely(ret != 0))
661 goto out_no_validate;
662 else if (!res->func->needs_backup && res->backup) {
663 list_del_init(&res->mob_head);
664 vmw_bo_unreference(&res->backup);
667 return 0;
669 out_no_validate:
670 return ret;
675 * vmw_resource_unbind_list
677 * @vbo: Pointer to the current backing MOB.
679 * Evicts the Guest Backed hardware resource if the backup
680 * buffer is being moved out of MOB memory.
681 * Note that this function will not race with the resource
682 * validation code, since resource validation and eviction
683 * both require the backup buffer to be reserved.
685 void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
688 struct vmw_resource *res, *next;
689 struct ttm_validate_buffer val_buf = {
690 .bo = &vbo->base,
691 .num_shared = 0
694 lockdep_assert_held(&vbo->base.resv->lock.base);
695 list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
696 if (!res->func->unbind)
697 continue;
699 (void) res->func->unbind(res, true, &val_buf);
700 res->backup_dirty = true;
701 res->res_dirty = false;
702 list_del_init(&res->mob_head);
705 (void) ttm_bo_wait(&vbo->base, false, false);
710 * vmw_query_readback_all - Read back cached query states
712 * @dx_query_mob: Buffer containing the DX query MOB
714 * Read back cached states from the device if they exist. This function
715 * assumings binding_mutex is held.
717 int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
719 struct vmw_resource *dx_query_ctx;
720 struct vmw_private *dev_priv;
721 struct {
722 SVGA3dCmdHeader header;
723 SVGA3dCmdDXReadbackAllQuery body;
724 } *cmd;
727 /* No query bound, so do nothing */
728 if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
729 return 0;
731 dx_query_ctx = dx_query_mob->dx_query_ctx;
732 dev_priv = dx_query_ctx->dev_priv;
734 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
735 if (unlikely(cmd == NULL)) {
736 DRM_ERROR("Failed reserving FIFO space for "
737 "query MOB read back.\n");
738 return -ENOMEM;
741 cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
742 cmd->header.size = sizeof(cmd->body);
743 cmd->body.cid = dx_query_ctx->id;
745 vmw_fifo_commit(dev_priv, sizeof(*cmd));
747 /* Triggers a rebind the next time affected context is bound */
748 dx_query_mob->dx_query_ctx = NULL;
750 return 0;
756 * vmw_query_move_notify - Read back cached query states
758 * @bo: The TTM buffer object about to move.
759 * @mem: The memory region @bo is moving to.
761 * Called before the query MOB is swapped out to read back cached query
762 * states from the device.
764 void vmw_query_move_notify(struct ttm_buffer_object *bo,
765 struct ttm_mem_reg *mem)
767 struct vmw_buffer_object *dx_query_mob;
768 struct ttm_bo_device *bdev = bo->bdev;
769 struct vmw_private *dev_priv;
772 dev_priv = container_of(bdev, struct vmw_private, bdev);
774 mutex_lock(&dev_priv->binding_mutex);
776 dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
777 if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
778 mutex_unlock(&dev_priv->binding_mutex);
779 return;
782 /* If BO is being moved from MOB to system memory */
783 if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
784 struct vmw_fence_obj *fence;
786 (void) vmw_query_readback_all(dx_query_mob);
787 mutex_unlock(&dev_priv->binding_mutex);
789 /* Create a fence and attach the BO to it */
790 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
791 vmw_bo_fence_single(bo, fence);
793 if (fence != NULL)
794 vmw_fence_obj_unreference(&fence);
796 (void) ttm_bo_wait(bo, false, false);
797 } else
798 mutex_unlock(&dev_priv->binding_mutex);
803 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
805 * @res: The resource being queried.
807 bool vmw_resource_needs_backup(const struct vmw_resource *res)
809 return res->func->needs_backup;
813 * vmw_resource_evict_type - Evict all resources of a specific type
815 * @dev_priv: Pointer to a device private struct
816 * @type: The resource type to evict
818 * To avoid thrashing starvation or as part of the hibernation sequence,
819 * try to evict all evictable resources of a specific type.
821 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
822 enum vmw_res_type type)
824 struct list_head *lru_list = &dev_priv->res_lru[type];
825 struct vmw_resource *evict_res;
826 unsigned err_count = 0;
827 int ret;
828 struct ww_acquire_ctx ticket;
830 do {
831 spin_lock(&dev_priv->resource_lock);
833 if (list_empty(lru_list))
834 goto out_unlock;
836 evict_res = vmw_resource_reference(
837 list_first_entry(lru_list, struct vmw_resource,
838 lru_head));
839 list_del_init(&evict_res->lru_head);
840 spin_unlock(&dev_priv->resource_lock);
842 /* Wait lock backup buffers with a ticket. */
843 ret = vmw_resource_do_evict(&ticket, evict_res, false);
844 if (unlikely(ret != 0)) {
845 spin_lock(&dev_priv->resource_lock);
846 list_add_tail(&evict_res->lru_head, lru_list);
847 spin_unlock(&dev_priv->resource_lock);
848 if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
849 vmw_resource_unreference(&evict_res);
850 return;
854 vmw_resource_unreference(&evict_res);
855 } while (1);
857 out_unlock:
858 spin_unlock(&dev_priv->resource_lock);
862 * vmw_resource_evict_all - Evict all evictable resources
864 * @dev_priv: Pointer to a device private struct
866 * To avoid thrashing starvation or as part of the hibernation sequence,
867 * evict all evictable resources. In particular this means that all
868 * guest-backed resources that are registered with the device are
869 * evicted and the OTable becomes clean.
871 void vmw_resource_evict_all(struct vmw_private *dev_priv)
873 enum vmw_res_type type;
875 mutex_lock(&dev_priv->cmdbuf_mutex);
877 for (type = 0; type < vmw_res_max; ++type)
878 vmw_resource_evict_type(dev_priv, type);
880 mutex_unlock(&dev_priv->cmdbuf_mutex);
884 * vmw_resource_pin - Add a pin reference on a resource
886 * @res: The resource to add a pin reference on
888 * This function adds a pin reference, and if needed validates the resource.
889 * Having a pin reference means that the resource can never be evicted, and
890 * its id will never change as long as there is a pin reference.
891 * This function returns 0 on success and a negative error code on failure.
893 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
895 struct ttm_operation_ctx ctx = { interruptible, false };
896 struct vmw_private *dev_priv = res->dev_priv;
897 int ret;
899 ttm_write_lock(&dev_priv->reservation_sem, interruptible);
900 mutex_lock(&dev_priv->cmdbuf_mutex);
901 ret = vmw_resource_reserve(res, interruptible, false);
902 if (ret)
903 goto out_no_reserve;
905 if (res->pin_count == 0) {
906 struct vmw_buffer_object *vbo = NULL;
908 if (res->backup) {
909 vbo = res->backup;
911 ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
912 if (!vbo->pin_count) {
913 ret = ttm_bo_validate
914 (&vbo->base,
915 res->func->backup_placement,
916 &ctx);
917 if (ret) {
918 ttm_bo_unreserve(&vbo->base);
919 goto out_no_validate;
923 /* Do we really need to pin the MOB as well? */
924 vmw_bo_pin_reserved(vbo, true);
926 ret = vmw_resource_validate(res, interruptible);
927 if (vbo)
928 ttm_bo_unreserve(&vbo->base);
929 if (ret)
930 goto out_no_validate;
932 res->pin_count++;
934 out_no_validate:
935 vmw_resource_unreserve(res, false, NULL, 0UL);
936 out_no_reserve:
937 mutex_unlock(&dev_priv->cmdbuf_mutex);
938 ttm_write_unlock(&dev_priv->reservation_sem);
940 return ret;
944 * vmw_resource_unpin - Remove a pin reference from a resource
946 * @res: The resource to remove a pin reference from
948 * Having a pin reference means that the resource can never be evicted, and
949 * its id will never change as long as there is a pin reference.
951 void vmw_resource_unpin(struct vmw_resource *res)
953 struct vmw_private *dev_priv = res->dev_priv;
954 int ret;
956 (void) ttm_read_lock(&dev_priv->reservation_sem, false);
957 mutex_lock(&dev_priv->cmdbuf_mutex);
959 ret = vmw_resource_reserve(res, false, true);
960 WARN_ON(ret);
962 WARN_ON(res->pin_count == 0);
963 if (--res->pin_count == 0 && res->backup) {
964 struct vmw_buffer_object *vbo = res->backup;
966 (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
967 vmw_bo_pin_reserved(vbo, false);
968 ttm_bo_unreserve(&vbo->base);
971 vmw_resource_unreserve(res, false, NULL, 0UL);
973 mutex_unlock(&dev_priv->cmdbuf_mutex);
974 ttm_read_unlock(&dev_priv->reservation_sem);
978 * vmw_res_type - Return the resource type
980 * @res: Pointer to the resource
982 enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
984 return res->func->res_type;