2 * Copyright 2014 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Christian König <christian.koenig@amd.com>
33 #include "amdgpu_trace.h"
35 struct amdgpu_sync_entry
{
36 struct hlist_node node
;
41 * amdgpu_sync_create - zero init sync object
43 * @sync: sync object to initialize
45 * Just clear the sync object for now.
47 void amdgpu_sync_create(struct amdgpu_sync
*sync
)
51 for (i
= 0; i
< AMDGPU_NUM_SYNCS
; ++i
)
52 sync
->semaphores
[i
] = NULL
;
54 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
)
55 sync
->sync_to
[i
] = NULL
;
57 hash_init(sync
->fences
);
58 sync
->last_vm_update
= NULL
;
61 static bool amdgpu_sync_same_dev(struct amdgpu_device
*adev
, struct fence
*f
)
63 struct amdgpu_fence
*a_fence
= to_amdgpu_fence(f
);
64 struct amd_sched_fence
*s_fence
= to_amd_sched_fence(f
);
67 return a_fence
->ring
->adev
== adev
;
70 struct amdgpu_ring
*ring
;
72 ring
= container_of(s_fence
->sched
, struct amdgpu_ring
, sched
);
73 return ring
->adev
== adev
;
79 static bool amdgpu_sync_test_owner(struct fence
*f
, void *owner
)
81 struct amdgpu_fence
*a_fence
= to_amdgpu_fence(f
);
82 struct amd_sched_fence
*s_fence
= to_amd_sched_fence(f
);
84 return s_fence
->owner
== owner
;
86 return a_fence
->owner
== owner
;
90 static void amdgpu_sync_keep_later(struct fence
**keep
, struct fence
*fence
)
92 if (*keep
&& fence_is_later(*keep
, fence
))
96 *keep
= fence_get(fence
);
100 * amdgpu_sync_fence - remember to sync to this fence
102 * @sync: sync object to add fence to
103 * @fence: fence to sync to
106 int amdgpu_sync_fence(struct amdgpu_device
*adev
, struct amdgpu_sync
*sync
,
109 struct amdgpu_sync_entry
*e
;
110 struct amdgpu_fence
*fence
;
115 if (amdgpu_sync_same_dev(adev
, f
) &&
116 amdgpu_sync_test_owner(f
, AMDGPU_FENCE_OWNER_VM
))
117 amdgpu_sync_keep_later(&sync
->last_vm_update
, f
);
119 fence
= to_amdgpu_fence(f
);
120 if (!fence
|| fence
->ring
->adev
!= adev
) {
121 hash_for_each_possible(sync
->fences
, e
, node
, f
->context
) {
122 if (unlikely(e
->fence
->context
!= f
->context
))
125 amdgpu_sync_keep_later(&e
->fence
, f
);
129 e
= kmalloc(sizeof(struct amdgpu_sync_entry
), GFP_KERNEL
);
133 hash_add(sync
->fences
, &e
->node
, f
->context
);
134 e
->fence
= fence_get(f
);
138 amdgpu_sync_keep_later(&sync
->sync_to
[fence
->ring
->idx
], f
);
143 static void *amdgpu_sync_get_owner(struct fence
*f
)
145 struct amdgpu_fence
*a_fence
= to_amdgpu_fence(f
);
146 struct amd_sched_fence
*s_fence
= to_amd_sched_fence(f
);
149 return s_fence
->owner
;
151 return a_fence
->owner
;
152 return AMDGPU_FENCE_OWNER_UNDEFINED
;
156 * amdgpu_sync_resv - use the semaphores to sync to a reservation object
158 * @sync: sync object to add fences from reservation object to
159 * @resv: reservation object with embedded fence
160 * @shared: true if we should only sync to the exclusive fence
162 * Sync to the fence using the semaphore objects
164 int amdgpu_sync_resv(struct amdgpu_device
*adev
,
165 struct amdgpu_sync
*sync
,
166 struct reservation_object
*resv
,
169 struct reservation_object_list
*flist
;
178 /* always sync to the exclusive fence */
179 f
= reservation_object_get_excl(resv
);
180 r
= amdgpu_sync_fence(adev
, sync
, f
);
182 flist
= reservation_object_get_list(resv
);
186 for (i
= 0; i
< flist
->shared_count
; ++i
) {
187 f
= rcu_dereference_protected(flist
->shared
[i
],
188 reservation_object_held(resv
));
189 if (amdgpu_sync_same_dev(adev
, f
)) {
190 /* VM updates are only interesting
191 * for other VM updates and moves.
193 fence_owner
= amdgpu_sync_get_owner(f
);
194 if ((owner
!= AMDGPU_FENCE_OWNER_UNDEFINED
) &&
195 (fence_owner
!= AMDGPU_FENCE_OWNER_UNDEFINED
) &&
196 ((owner
== AMDGPU_FENCE_OWNER_VM
) !=
197 (fence_owner
== AMDGPU_FENCE_OWNER_VM
)))
200 /* Ignore fence from the same owner as
201 * long as it isn't undefined.
203 if (owner
!= AMDGPU_FENCE_OWNER_UNDEFINED
&&
204 fence_owner
== owner
)
208 r
= amdgpu_sync_fence(adev
, sync
, f
);
215 struct fence
*amdgpu_sync_get_fence(struct amdgpu_sync
*sync
)
217 struct amdgpu_sync_entry
*e
;
218 struct hlist_node
*tmp
;
222 hash_for_each_safe(sync
->fences
, i
, tmp
, e
, node
) {
229 if (!fence_is_signaled(f
))
237 int amdgpu_sync_wait(struct amdgpu_sync
*sync
)
239 struct amdgpu_sync_entry
*e
;
240 struct hlist_node
*tmp
;
243 hash_for_each_safe(sync
->fences
, i
, tmp
, e
, node
) {
244 r
= fence_wait(e
->fence
, false);
253 if (amdgpu_enable_semaphores
)
256 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
257 struct fence
*fence
= sync
->sync_to
[i
];
261 r
= fence_wait(fence
, false);
270 * amdgpu_sync_rings - sync ring to all registered fences
272 * @sync: sync object to use
273 * @ring: ring that needs sync
275 * Ensure that all registered fences are signaled before letting
276 * the ring continue. The caller must hold the ring lock.
278 int amdgpu_sync_rings(struct amdgpu_sync
*sync
,
279 struct amdgpu_ring
*ring
)
281 struct amdgpu_device
*adev
= ring
->adev
;
285 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
286 struct amdgpu_ring
*other
= adev
->rings
[i
];
287 struct amdgpu_semaphore
*semaphore
;
288 struct amdgpu_fence
*fence
;
290 if (!sync
->sync_to
[i
])
293 fence
= to_amdgpu_fence(sync
->sync_to
[i
]);
295 /* check if we really need to sync */
296 if (!amdgpu_fence_need_sync(fence
, ring
))
299 /* prevent GPU deadlocks */
301 dev_err(adev
->dev
, "Syncing to a disabled ring!");
305 if (amdgpu_enable_scheduler
|| !amdgpu_enable_semaphores
) {
306 r
= fence_wait(&fence
->base
, true);
312 if (count
>= AMDGPU_NUM_SYNCS
) {
313 /* not enough room, wait manually */
314 r
= fence_wait(&fence
->base
, false);
319 r
= amdgpu_semaphore_create(adev
, &semaphore
);
323 sync
->semaphores
[count
++] = semaphore
;
325 /* allocate enough space for sync command */
326 r
= amdgpu_ring_alloc(other
, 16);
330 /* emit the signal semaphore */
331 if (!amdgpu_semaphore_emit_signal(other
, semaphore
)) {
332 /* signaling wasn't successful wait manually */
333 amdgpu_ring_undo(other
);
334 r
= fence_wait(&fence
->base
, false);
340 /* we assume caller has already allocated space on waiters ring */
341 if (!amdgpu_semaphore_emit_wait(ring
, semaphore
)) {
342 /* waiting wasn't successful wait manually */
343 amdgpu_ring_undo(other
);
344 r
= fence_wait(&fence
->base
, false);
350 amdgpu_ring_commit(other
);
351 amdgpu_fence_note_sync(fence
, ring
);
358 * amdgpu_sync_free - free the sync object
360 * @adev: amdgpu_device pointer
361 * @sync: sync object to use
362 * @fence: fence to use for the free
364 * Free the sync object by freeing all semaphores in it.
366 void amdgpu_sync_free(struct amdgpu_device
*adev
,
367 struct amdgpu_sync
*sync
,
370 struct amdgpu_sync_entry
*e
;
371 struct hlist_node
*tmp
;
374 hash_for_each_safe(sync
->fences
, i
, tmp
, e
, node
) {
380 for (i
= 0; i
< AMDGPU_NUM_SYNCS
; ++i
)
381 amdgpu_semaphore_free(adev
, &sync
->semaphores
[i
], fence
);
383 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
)
384 fence_put(sync
->sync_to
[i
]);
386 fence_put(sync
->last_vm_update
);