treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / ttm / ttm_execbuf_util.c
blob1797f04c05345ba226b5ea566bf4a60ba8f4cd6d
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 #include <drm/ttm/ttm_execbuf_util.h>
30 #include <drm/ttm/ttm_bo_driver.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include <linux/wait.h>
33 #include <linux/sched.h>
34 #include <linux/module.h>
36 static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
37 struct ttm_validate_buffer *entry)
39 list_for_each_entry_continue_reverse(entry, list, head) {
40 struct ttm_buffer_object *bo = entry->bo;
42 dma_resv_unlock(bo->base.resv);
46 void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
47 struct list_head *list)
49 struct ttm_validate_buffer *entry;
51 if (list_empty(list))
52 return;
54 spin_lock(&ttm_bo_glob.lru_lock);
55 list_for_each_entry(entry, list, head) {
56 struct ttm_buffer_object *bo = entry->bo;
58 ttm_bo_move_to_lru_tail(bo, NULL);
59 dma_resv_unlock(bo->base.resv);
61 spin_unlock(&ttm_bo_glob.lru_lock);
63 if (ticket)
64 ww_acquire_fini(ticket);
66 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
69 * Reserve buffers for validation.
71 * If a buffer in the list is marked for CPU access, we back off and
72 * wait for that buffer to become free for GPU access.
74 * If a buffer is reserved for another validation, the validator with
75 * the highest validation sequence backs off and waits for that buffer
76 * to become unreserved. This prevents deadlocks when validating multiple
77 * buffers in different orders.
80 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
81 struct list_head *list, bool intr,
82 struct list_head *dups)
84 struct ttm_validate_buffer *entry;
85 int ret;
87 if (list_empty(list))
88 return 0;
90 if (ticket)
91 ww_acquire_init(ticket, &reservation_ww_class);
93 list_for_each_entry(entry, list, head) {
94 struct ttm_buffer_object *bo = entry->bo;
96 ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
97 if (ret == -EALREADY && dups) {
98 struct ttm_validate_buffer *safe = entry;
99 entry = list_prev_entry(entry, head);
100 list_del(&safe->head);
101 list_add(&safe->head, dups);
102 continue;
105 if (!ret) {
106 if (!entry->num_shared)
107 continue;
109 ret = dma_resv_reserve_shared(bo->base.resv,
110 entry->num_shared);
111 if (!ret)
112 continue;
115 /* uh oh, we lost out, drop every reservation and try
116 * to only reserve this buffer, then start over if
117 * this succeeds.
119 ttm_eu_backoff_reservation_reverse(list, entry);
121 if (ret == -EDEADLK) {
122 if (intr) {
123 ret = dma_resv_lock_slow_interruptible(bo->base.resv,
124 ticket);
125 } else {
126 dma_resv_lock_slow(bo->base.resv, ticket);
127 ret = 0;
131 if (!ret && entry->num_shared)
132 ret = dma_resv_reserve_shared(bo->base.resv,
133 entry->num_shared);
135 if (unlikely(ret != 0)) {
136 if (ret == -EINTR)
137 ret = -ERESTARTSYS;
138 if (ticket) {
139 ww_acquire_done(ticket);
140 ww_acquire_fini(ticket);
142 return ret;
145 /* move this item to the front of the list,
146 * forces correct iteration of the loop without keeping track
148 list_del(&entry->head);
149 list_add(&entry->head, list);
152 return 0;
154 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
156 void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
157 struct list_head *list,
158 struct dma_fence *fence)
160 struct ttm_validate_buffer *entry;
162 if (list_empty(list))
163 return;
165 spin_lock(&ttm_bo_glob.lru_lock);
166 list_for_each_entry(entry, list, head) {
167 struct ttm_buffer_object *bo = entry->bo;
169 if (entry->num_shared)
170 dma_resv_add_shared_fence(bo->base.resv, fence);
171 else
172 dma_resv_add_excl_fence(bo->base.resv, fence);
173 ttm_bo_move_to_lru_tail(bo, NULL);
174 dma_resv_unlock(bo->base.resv);
176 spin_unlock(&ttm_bo_glob.lru_lock);
177 if (ticket)
178 ww_acquire_fini(ticket);
180 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);