2 * Copyright 2014 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Christian König <christian.koenig@amd.com>
33 #include "amdgpu_trace.h"
35 struct amdgpu_sync_entry
{
36 struct hlist_node node
;
37 struct dma_fence
*fence
;
41 static struct kmem_cache
*amdgpu_sync_slab
;
44 * amdgpu_sync_create - zero init sync object
46 * @sync: sync object to initialize
48 * Just clear the sync object for now.
50 void amdgpu_sync_create(struct amdgpu_sync
*sync
)
52 hash_init(sync
->fences
);
53 sync
->last_vm_update
= NULL
;
57 * amdgpu_sync_same_dev - test if fence belong to us
59 * @adev: amdgpu device to use for the test
62 * Test if the fence was issued by us.
64 static bool amdgpu_sync_same_dev(struct amdgpu_device
*adev
,
67 struct drm_sched_fence
*s_fence
= to_drm_sched_fence(f
);
70 struct amdgpu_ring
*ring
;
72 ring
= container_of(s_fence
->sched
, struct amdgpu_ring
, sched
);
73 return ring
->adev
== adev
;
80 * amdgpu_sync_get_owner - extract the owner of a fence
82 * @fence: fence get the owner from
84 * Extract who originally created the fence.
86 static void *amdgpu_sync_get_owner(struct dma_fence
*f
)
88 struct drm_sched_fence
*s_fence
= to_drm_sched_fence(f
);
91 return s_fence
->owner
;
93 return AMDGPU_FENCE_OWNER_UNDEFINED
;
97 * amdgpu_sync_keep_later - Keep the later fence
99 * @keep: existing fence to test
102 * Either keep the existing fence or the new one, depending which one is later.
104 static void amdgpu_sync_keep_later(struct dma_fence
**keep
,
105 struct dma_fence
*fence
)
107 if (*keep
&& dma_fence_is_later(*keep
, fence
))
110 dma_fence_put(*keep
);
111 *keep
= dma_fence_get(fence
);
115 * amdgpu_sync_add_later - add the fence to the hash
117 * @sync: sync object to add the fence to
120 * Tries to add the fence to an existing hash entry. Returns true when an entry
121 * was found, false otherwise.
123 static bool amdgpu_sync_add_later(struct amdgpu_sync
*sync
, struct dma_fence
*f
, bool explicit)
125 struct amdgpu_sync_entry
*e
;
127 hash_for_each_possible(sync
->fences
, e
, node
, f
->context
) {
128 if (unlikely(e
->fence
->context
!= f
->context
))
131 amdgpu_sync_keep_later(&e
->fence
, f
);
133 /* Preserve eplicit flag to not loose pipe line sync */
134 e
->explicit |= explicit;
142 * amdgpu_sync_fence - remember to sync to this fence
144 * @sync: sync object to add fence to
145 * @fence: fence to sync to
148 int amdgpu_sync_fence(struct amdgpu_device
*adev
, struct amdgpu_sync
*sync
,
149 struct dma_fence
*f
, bool explicit)
151 struct amdgpu_sync_entry
*e
;
155 if (amdgpu_sync_same_dev(adev
, f
) &&
156 amdgpu_sync_get_owner(f
) == AMDGPU_FENCE_OWNER_VM
)
157 amdgpu_sync_keep_later(&sync
->last_vm_update
, f
);
159 if (amdgpu_sync_add_later(sync
, f
, explicit))
162 e
= kmem_cache_alloc(amdgpu_sync_slab
, GFP_KERNEL
);
166 e
->explicit = explicit;
168 hash_add(sync
->fences
, &e
->node
, f
->context
);
169 e
->fence
= dma_fence_get(f
);
174 * amdgpu_sync_resv - sync to a reservation object
176 * @sync: sync object to add fences from reservation object to
177 * @resv: reservation object with embedded fence
178 * @explicit_sync: true if we should only sync to the exclusive fence
182 int amdgpu_sync_resv(struct amdgpu_device
*adev
,
183 struct amdgpu_sync
*sync
,
184 struct reservation_object
*resv
,
185 void *owner
, bool explicit_sync
)
187 struct reservation_object_list
*flist
;
196 /* always sync to the exclusive fence */
197 f
= reservation_object_get_excl(resv
);
198 r
= amdgpu_sync_fence(adev
, sync
, f
, false);
200 flist
= reservation_object_get_list(resv
);
204 for (i
= 0; i
< flist
->shared_count
; ++i
) {
205 f
= rcu_dereference_protected(flist
->shared
[i
],
206 reservation_object_held(resv
));
207 if (amdgpu_sync_same_dev(adev
, f
)) {
208 /* VM updates are only interesting
209 * for other VM updates and moves.
211 fence_owner
= amdgpu_sync_get_owner(f
);
212 if ((owner
!= AMDGPU_FENCE_OWNER_UNDEFINED
) &&
213 (fence_owner
!= AMDGPU_FENCE_OWNER_UNDEFINED
) &&
214 ((owner
== AMDGPU_FENCE_OWNER_VM
) !=
215 (fence_owner
== AMDGPU_FENCE_OWNER_VM
)))
218 /* Ignore fence from the same owner and explicit one as
219 * long as it isn't undefined.
221 if (owner
!= AMDGPU_FENCE_OWNER_UNDEFINED
&&
222 (fence_owner
== owner
|| explicit_sync
))
226 r
= amdgpu_sync_fence(adev
, sync
, f
, false);
234 * amdgpu_sync_peek_fence - get the next fence not signaled yet
236 * @sync: the sync object
237 * @ring: optional ring to use for test
239 * Returns the next fence not signaled yet without removing it from the sync
242 struct dma_fence
*amdgpu_sync_peek_fence(struct amdgpu_sync
*sync
,
243 struct amdgpu_ring
*ring
)
245 struct amdgpu_sync_entry
*e
;
246 struct hlist_node
*tmp
;
249 hash_for_each_safe(sync
->fences
, i
, tmp
, e
, node
) {
250 struct dma_fence
*f
= e
->fence
;
251 struct drm_sched_fence
*s_fence
= to_drm_sched_fence(f
);
253 if (dma_fence_is_signaled(f
)) {
256 kmem_cache_free(amdgpu_sync_slab
, e
);
259 if (ring
&& s_fence
) {
260 /* For fences from the same ring it is sufficient
261 * when they are scheduled.
263 if (s_fence
->sched
== &ring
->sched
) {
264 if (dma_fence_is_signaled(&s_fence
->scheduled
))
267 return &s_fence
->scheduled
;
278 * amdgpu_sync_get_fence - get the next fence from the sync object
280 * @sync: sync object to use
281 * @explicit: true if the next fence is explicit
283 * Get and removes the next fence from the sync object not signaled yet.
285 struct dma_fence
*amdgpu_sync_get_fence(struct amdgpu_sync
*sync
, bool *explicit)
287 struct amdgpu_sync_entry
*e
;
288 struct hlist_node
*tmp
;
291 hash_for_each_safe(sync
->fences
, i
, tmp
, e
, node
) {
295 *explicit = e
->explicit;
298 kmem_cache_free(amdgpu_sync_slab
, e
);
300 if (!dma_fence_is_signaled(f
))
308 int amdgpu_sync_wait(struct amdgpu_sync
*sync
, bool intr
)
310 struct amdgpu_sync_entry
*e
;
311 struct hlist_node
*tmp
;
314 hash_for_each_safe(sync
->fences
, i
, tmp
, e
, node
) {
315 r
= dma_fence_wait(e
->fence
, intr
);
320 dma_fence_put(e
->fence
);
321 kmem_cache_free(amdgpu_sync_slab
, e
);
328 * amdgpu_sync_free - free the sync object
330 * @sync: sync object to use
332 * Free the sync object.
334 void amdgpu_sync_free(struct amdgpu_sync
*sync
)
336 struct amdgpu_sync_entry
*e
;
337 struct hlist_node
*tmp
;
340 hash_for_each_safe(sync
->fences
, i
, tmp
, e
, node
) {
342 dma_fence_put(e
->fence
);
343 kmem_cache_free(amdgpu_sync_slab
, e
);
346 dma_fence_put(sync
->last_vm_update
);
350 * amdgpu_sync_init - init sync object subsystem
352 * Allocate the slab allocator.
354 int amdgpu_sync_init(void)
356 amdgpu_sync_slab
= kmem_cache_create(
357 "amdgpu_sync", sizeof(struct amdgpu_sync_entry
), 0,
358 SLAB_HWCACHE_ALIGN
, NULL
);
359 if (!amdgpu_sync_slab
)
366 * amdgpu_sync_fini - fini sync object subsystem
368 * Free the slab allocator.
370 void amdgpu_sync_fini(void)
372 kmem_cache_destroy(amdgpu_sync_slab
);