1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "ttm/ttm_execbuf_util.h"
29 #include "ttm/ttm_bo_driver.h"
30 #include "ttm/ttm_placement.h"
31 #include <linux/wait.h>
32 #include <linux/sched.h>
33 #include <linux/module.h>
35 static void ttm_eu_backoff_reservation_locked(struct list_head
*list
)
37 struct ttm_validate_buffer
*entry
;
39 list_for_each_entry(entry
, list
, head
) {
40 struct ttm_buffer_object
*bo
= entry
->bo
;
45 ttm_bo_add_to_lru(bo
);
46 entry
->removed
= false;
49 entry
->reserved
= false;
50 atomic_set(&bo
->reserved
, 0);
51 wake_up_all(&bo
->event_queue
);
55 static void ttm_eu_del_from_lru_locked(struct list_head
*list
)
57 struct ttm_validate_buffer
*entry
;
59 list_for_each_entry(entry
, list
, head
) {
60 struct ttm_buffer_object
*bo
= entry
->bo
;
64 if (!entry
->removed
) {
65 entry
->put_count
= ttm_bo_del_from_lru(bo
);
66 entry
->removed
= true;
71 static void ttm_eu_list_ref_sub(struct list_head
*list
)
73 struct ttm_validate_buffer
*entry
;
75 list_for_each_entry(entry
, list
, head
) {
76 struct ttm_buffer_object
*bo
= entry
->bo
;
78 if (entry
->put_count
) {
79 ttm_bo_list_ref_sub(bo
, entry
->put_count
, true);
85 static int ttm_eu_wait_unreserved_locked(struct list_head
*list
,
86 struct ttm_buffer_object
*bo
)
88 struct ttm_bo_global
*glob
= bo
->glob
;
91 ttm_eu_del_from_lru_locked(list
);
92 spin_unlock(&glob
->lru_lock
);
93 ret
= ttm_bo_wait_unreserved(bo
, true);
94 spin_lock(&glob
->lru_lock
);
95 if (unlikely(ret
!= 0))
96 ttm_eu_backoff_reservation_locked(list
);
101 void ttm_eu_backoff_reservation(struct list_head
*list
)
103 struct ttm_validate_buffer
*entry
;
104 struct ttm_bo_global
*glob
;
106 if (list_empty(list
))
109 entry
= list_first_entry(list
, struct ttm_validate_buffer
, head
);
110 glob
= entry
->bo
->glob
;
111 spin_lock(&glob
->lru_lock
);
112 ttm_eu_backoff_reservation_locked(list
);
113 spin_unlock(&glob
->lru_lock
);
115 EXPORT_SYMBOL(ttm_eu_backoff_reservation
);
118 * Reserve buffers for validation.
120 * If a buffer in the list is marked for CPU access, we back off and
121 * wait for that buffer to become free for GPU access.
123 * If a buffer is reserved for another validation, the validator with
124 * the highest validation sequence backs off and waits for that buffer
125 * to become unreserved. This prevents deadlocks when validating multiple
126 * buffers in different orders.
129 int ttm_eu_reserve_buffers(struct list_head
*list
)
131 struct ttm_bo_global
*glob
;
132 struct ttm_validate_buffer
*entry
;
136 if (list_empty(list
))
139 list_for_each_entry(entry
, list
, head
) {
140 entry
->reserved
= false;
141 entry
->put_count
= 0;
142 entry
->removed
= false;
145 entry
= list_first_entry(list
, struct ttm_validate_buffer
, head
);
146 glob
= entry
->bo
->glob
;
149 spin_lock(&glob
->lru_lock
);
150 val_seq
= entry
->bo
->bdev
->val_seq
++;
152 list_for_each_entry(entry
, list
, head
) {
153 struct ttm_buffer_object
*bo
= entry
->bo
;
156 ret
= ttm_bo_reserve_locked(bo
, true, true, true, val_seq
);
161 ret
= ttm_eu_wait_unreserved_locked(list
, bo
);
162 if (unlikely(ret
!= 0)) {
163 spin_unlock(&glob
->lru_lock
);
164 ttm_eu_list_ref_sub(list
);
169 ttm_eu_backoff_reservation_locked(list
);
170 spin_unlock(&glob
->lru_lock
);
171 ttm_eu_list_ref_sub(list
);
172 ret
= ttm_bo_wait_unreserved(bo
, true);
173 if (unlikely(ret
!= 0))
177 ttm_eu_backoff_reservation_locked(list
);
178 spin_unlock(&glob
->lru_lock
);
179 ttm_eu_list_ref_sub(list
);
183 entry
->reserved
= true;
184 if (unlikely(atomic_read(&bo
->cpu_writers
) > 0)) {
185 ttm_eu_backoff_reservation_locked(list
);
186 spin_unlock(&glob
->lru_lock
);
187 ttm_eu_list_ref_sub(list
);
188 ret
= ttm_bo_wait_cpu(bo
, false);
195 ttm_eu_del_from_lru_locked(list
);
196 spin_unlock(&glob
->lru_lock
);
197 ttm_eu_list_ref_sub(list
);
201 EXPORT_SYMBOL(ttm_eu_reserve_buffers
);
203 void ttm_eu_fence_buffer_objects(struct list_head
*list
, void *sync_obj
)
205 struct ttm_validate_buffer
*entry
;
206 struct ttm_buffer_object
*bo
;
207 struct ttm_bo_global
*glob
;
208 struct ttm_bo_device
*bdev
;
209 struct ttm_bo_driver
*driver
;
211 if (list_empty(list
))
214 bo
= list_first_entry(list
, struct ttm_validate_buffer
, head
)->bo
;
216 driver
= bdev
->driver
;
219 spin_lock(&bdev
->fence_lock
);
220 spin_lock(&glob
->lru_lock
);
222 list_for_each_entry(entry
, list
, head
) {
224 entry
->old_sync_obj
= bo
->sync_obj
;
225 bo
->sync_obj
= driver
->sync_obj_ref(sync_obj
);
226 bo
->sync_obj_arg
= entry
->new_sync_obj_arg
;
227 ttm_bo_unreserve_locked(bo
);
228 entry
->reserved
= false;
230 spin_unlock(&glob
->lru_lock
);
231 spin_unlock(&bdev
->fence_lock
);
233 list_for_each_entry(entry
, list
, head
) {
234 if (entry
->old_sync_obj
)
235 driver
->sync_obj_unref(&entry
->old_sync_obj
);
238 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects
);