1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <drm/ttm/ttm_execbuf_util.h>
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <linux/wait.h>
32 #include <linux/sched.h>
33 #include <linux/module.h>
35 static void ttm_eu_backoff_reservation_reverse(struct list_head
*list
,
36 struct ttm_validate_buffer
*entry
)
38 list_for_each_entry_continue_reverse(entry
, list
, head
) {
39 struct ttm_buffer_object
*bo
= entry
->bo
;
41 reservation_object_unlock(bo
->resv
);
45 static void ttm_eu_del_from_lru_locked(struct list_head
*list
)
47 struct ttm_validate_buffer
*entry
;
49 list_for_each_entry(entry
, list
, head
) {
50 struct ttm_buffer_object
*bo
= entry
->bo
;
51 ttm_bo_del_from_lru(bo
);
55 void ttm_eu_backoff_reservation(struct ww_acquire_ctx
*ticket
,
56 struct list_head
*list
)
58 struct ttm_validate_buffer
*entry
;
59 struct ttm_bo_global
*glob
;
64 entry
= list_first_entry(list
, struct ttm_validate_buffer
, head
);
65 glob
= entry
->bo
->glob
;
67 spin_lock(&glob
->lru_lock
);
68 list_for_each_entry(entry
, list
, head
) {
69 struct ttm_buffer_object
*bo
= entry
->bo
;
71 ttm_bo_add_to_lru(bo
);
72 reservation_object_unlock(bo
->resv
);
74 spin_unlock(&glob
->lru_lock
);
77 ww_acquire_fini(ticket
);
79 EXPORT_SYMBOL(ttm_eu_backoff_reservation
);
82 * Reserve buffers for validation.
84 * If a buffer in the list is marked for CPU access, we back off and
85 * wait for that buffer to become free for GPU access.
87 * If a buffer is reserved for another validation, the validator with
88 * the highest validation sequence backs off and waits for that buffer
89 * to become unreserved. This prevents deadlocks when validating multiple
90 * buffers in different orders.
93 int ttm_eu_reserve_buffers(struct ww_acquire_ctx
*ticket
,
94 struct list_head
*list
, bool intr
,
95 struct list_head
*dups
)
97 struct ttm_bo_global
*glob
;
98 struct ttm_validate_buffer
*entry
;
101 if (list_empty(list
))
104 entry
= list_first_entry(list
, struct ttm_validate_buffer
, head
);
105 glob
= entry
->bo
->glob
;
108 ww_acquire_init(ticket
, &reservation_ww_class
);
110 list_for_each_entry(entry
, list
, head
) {
111 struct ttm_buffer_object
*bo
= entry
->bo
;
113 ret
= __ttm_bo_reserve(bo
, intr
, (ticket
== NULL
), ticket
);
114 if (!ret
&& unlikely(atomic_read(&bo
->cpu_writers
) > 0)) {
115 reservation_object_unlock(bo
->resv
);
119 } else if (ret
== -EALREADY
&& dups
) {
120 struct ttm_validate_buffer
*safe
= entry
;
121 entry
= list_prev_entry(entry
, head
);
122 list_del(&safe
->head
);
123 list_add(&safe
->head
, dups
);
131 ret
= reservation_object_reserve_shared(bo
->resv
);
136 /* uh oh, we lost out, drop every reservation and try
137 * to only reserve this buffer, then start over if
140 ttm_eu_backoff_reservation_reverse(list
, entry
);
142 if (ret
== -EDEADLK
&& intr
) {
143 ret
= ww_mutex_lock_slow_interruptible(&bo
->resv
->lock
,
145 } else if (ret
== -EDEADLK
) {
146 ww_mutex_lock_slow(&bo
->resv
->lock
, ticket
);
150 if (!ret
&& entry
->shared
)
151 ret
= reservation_object_reserve_shared(bo
->resv
);
153 if (unlikely(ret
!= 0)) {
157 ww_acquire_done(ticket
);
158 ww_acquire_fini(ticket
);
163 /* move this item to the front of the list,
164 * forces correct iteration of the loop without keeping track
166 list_del(&entry
->head
);
167 list_add(&entry
->head
, list
);
171 ww_acquire_done(ticket
);
172 spin_lock(&glob
->lru_lock
);
173 ttm_eu_del_from_lru_locked(list
);
174 spin_unlock(&glob
->lru_lock
);
177 EXPORT_SYMBOL(ttm_eu_reserve_buffers
);
179 void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx
*ticket
,
180 struct list_head
*list
,
181 struct dma_fence
*fence
)
183 struct ttm_validate_buffer
*entry
;
184 struct ttm_buffer_object
*bo
;
185 struct ttm_bo_global
*glob
;
186 struct ttm_bo_device
*bdev
;
187 struct ttm_bo_driver
*driver
;
189 if (list_empty(list
))
192 bo
= list_first_entry(list
, struct ttm_validate_buffer
, head
)->bo
;
194 driver
= bdev
->driver
;
197 spin_lock(&glob
->lru_lock
);
199 list_for_each_entry(entry
, list
, head
) {
202 reservation_object_add_shared_fence(bo
->resv
, fence
);
204 reservation_object_add_excl_fence(bo
->resv
, fence
);
205 ttm_bo_add_to_lru(bo
);
206 reservation_object_unlock(bo
->resv
);
208 spin_unlock(&glob
->lru_lock
);
210 ww_acquire_fini(ticket
);
212 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects
);