1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <drm/ttm/ttm_execbuf_util.h>
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <linux/wait.h>
32 #include <linux/sched.h>
33 #include <linux/module.h>
35 static void ttm_eu_backoff_reservation_reverse(struct list_head
*list
,
36 struct ttm_validate_buffer
*entry
)
38 list_for_each_entry_continue_reverse(entry
, list
, head
) {
39 struct ttm_buffer_object
*bo
= entry
->bo
;
41 __ttm_bo_unreserve(bo
);
45 static void ttm_eu_del_from_lru_locked(struct list_head
*list
)
47 struct ttm_validate_buffer
*entry
;
49 list_for_each_entry(entry
, list
, head
) {
50 struct ttm_buffer_object
*bo
= entry
->bo
;
51 unsigned put_count
= ttm_bo_del_from_lru(bo
);
53 ttm_bo_list_ref_sub(bo
, put_count
, true);
57 void ttm_eu_backoff_reservation(struct ww_acquire_ctx
*ticket
,
58 struct list_head
*list
)
60 struct ttm_validate_buffer
*entry
;
61 struct ttm_bo_global
*glob
;
66 entry
= list_first_entry(list
, struct ttm_validate_buffer
, head
);
67 glob
= entry
->bo
->glob
;
69 spin_lock(&glob
->lru_lock
);
70 list_for_each_entry(entry
, list
, head
) {
71 struct ttm_buffer_object
*bo
= entry
->bo
;
73 ttm_bo_add_to_lru(bo
);
74 __ttm_bo_unreserve(bo
);
76 spin_unlock(&glob
->lru_lock
);
79 ww_acquire_fini(ticket
);
81 EXPORT_SYMBOL(ttm_eu_backoff_reservation
);
84 * Reserve buffers for validation.
86 * If a buffer in the list is marked for CPU access, we back off and
87 * wait for that buffer to become free for GPU access.
89 * If a buffer is reserved for another validation, the validator with
90 * the highest validation sequence backs off and waits for that buffer
91 * to become unreserved. This prevents deadlocks when validating multiple
92 * buffers in different orders.
95 int ttm_eu_reserve_buffers(struct ww_acquire_ctx
*ticket
,
96 struct list_head
*list
, bool intr
,
97 struct list_head
*dups
)
99 struct ttm_bo_global
*glob
;
100 struct ttm_validate_buffer
*entry
;
103 if (list_empty(list
))
106 entry
= list_first_entry(list
, struct ttm_validate_buffer
, head
);
107 glob
= entry
->bo
->glob
;
110 ww_acquire_init(ticket
, &reservation_ww_class
);
112 list_for_each_entry(entry
, list
, head
) {
113 struct ttm_buffer_object
*bo
= entry
->bo
;
115 ret
= __ttm_bo_reserve(bo
, intr
, (ticket
== NULL
), ticket
);
116 if (!ret
&& unlikely(atomic_read(&bo
->cpu_writers
) > 0)) {
117 __ttm_bo_unreserve(bo
);
121 } else if (ret
== -EALREADY
&& dups
) {
122 struct ttm_validate_buffer
*safe
= entry
;
123 entry
= list_prev_entry(entry
, head
);
124 list_del(&safe
->head
);
125 list_add(&safe
->head
, dups
);
133 ret
= reservation_object_reserve_shared(bo
->resv
);
138 /* uh oh, we lost out, drop every reservation and try
139 * to only reserve this buffer, then start over if
142 ttm_eu_backoff_reservation_reverse(list
, entry
);
144 if (ret
== -EDEADLK
&& intr
) {
145 ret
= ww_mutex_lock_slow_interruptible(&bo
->resv
->lock
,
147 } else if (ret
== -EDEADLK
) {
148 ww_mutex_lock_slow(&bo
->resv
->lock
, ticket
);
152 if (!ret
&& entry
->shared
)
153 ret
= reservation_object_reserve_shared(bo
->resv
);
155 if (unlikely(ret
!= 0)) {
159 ww_acquire_done(ticket
);
160 ww_acquire_fini(ticket
);
165 /* move this item to the front of the list,
166 * forces correct iteration of the loop without keeping track
168 list_del(&entry
->head
);
169 list_add(&entry
->head
, list
);
173 ww_acquire_done(ticket
);
174 spin_lock(&glob
->lru_lock
);
175 ttm_eu_del_from_lru_locked(list
);
176 spin_unlock(&glob
->lru_lock
);
179 EXPORT_SYMBOL(ttm_eu_reserve_buffers
);
181 void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx
*ticket
,
182 struct list_head
*list
,
183 struct dma_fence
*fence
)
185 struct ttm_validate_buffer
*entry
;
186 struct ttm_buffer_object
*bo
;
187 struct ttm_bo_global
*glob
;
188 struct ttm_bo_device
*bdev
;
189 struct ttm_bo_driver
*driver
;
191 if (list_empty(list
))
194 bo
= list_first_entry(list
, struct ttm_validate_buffer
, head
)->bo
;
196 driver
= bdev
->driver
;
199 spin_lock(&glob
->lru_lock
);
201 list_for_each_entry(entry
, list
, head
) {
204 reservation_object_add_shared_fence(bo
->resv
, fence
);
206 reservation_object_add_excl_fence(bo
->resv
, fence
);
207 ttm_bo_add_to_lru(bo
);
208 __ttm_bo_unreserve(bo
);
210 spin_unlock(&glob
->lru_lock
);
212 ww_acquire_fini(ticket
);
214 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects
);