treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_sync.c
bloba09b6b9c27d11bab2c7c5e0f449f035f500851d2
1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
27 * Authors:
28 * Christian König <christian.koenig@amd.com>
31 #include "amdgpu.h"
32 #include "amdgpu_trace.h"
33 #include "amdgpu_amdkfd.h"
35 struct amdgpu_sync_entry {
36 struct hlist_node node;
37 struct dma_fence *fence;
38 bool explicit;
41 static struct kmem_cache *amdgpu_sync_slab;
43 /**
44 * amdgpu_sync_create - zero init sync object
46 * @sync: sync object to initialize
48 * Just clear the sync object for now.
50 void amdgpu_sync_create(struct amdgpu_sync *sync)
52 hash_init(sync->fences);
53 sync->last_vm_update = NULL;
56 /**
57 * amdgpu_sync_same_dev - test if fence belong to us
59 * @adev: amdgpu device to use for the test
60 * @f: fence to test
62 * Test if the fence was issued by us.
64 static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
65 struct dma_fence *f)
67 struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
69 if (s_fence) {
70 struct amdgpu_ring *ring;
72 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
73 return ring->adev == adev;
76 return false;
79 /**
80 * amdgpu_sync_get_owner - extract the owner of a fence
82 * @fence: fence get the owner from
84 * Extract who originally created the fence.
86 static void *amdgpu_sync_get_owner(struct dma_fence *f)
88 struct drm_sched_fence *s_fence;
89 struct amdgpu_amdkfd_fence *kfd_fence;
91 if (!f)
92 return AMDGPU_FENCE_OWNER_UNDEFINED;
94 s_fence = to_drm_sched_fence(f);
95 if (s_fence)
96 return s_fence->owner;
98 kfd_fence = to_amdgpu_amdkfd_fence(f);
99 if (kfd_fence)
100 return AMDGPU_FENCE_OWNER_KFD;
102 return AMDGPU_FENCE_OWNER_UNDEFINED;
106 * amdgpu_sync_keep_later - Keep the later fence
108 * @keep: existing fence to test
109 * @fence: new fence
111 * Either keep the existing fence or the new one, depending which one is later.
113 static void amdgpu_sync_keep_later(struct dma_fence **keep,
114 struct dma_fence *fence)
116 if (*keep && dma_fence_is_later(*keep, fence))
117 return;
119 dma_fence_put(*keep);
120 *keep = dma_fence_get(fence);
124 * amdgpu_sync_add_later - add the fence to the hash
126 * @sync: sync object to add the fence to
127 * @f: fence to add
129 * Tries to add the fence to an existing hash entry. Returns true when an entry
130 * was found, false otherwise.
132 static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f,
133 bool explicit)
135 struct amdgpu_sync_entry *e;
137 hash_for_each_possible(sync->fences, e, node, f->context) {
138 if (unlikely(e->fence->context != f->context))
139 continue;
141 amdgpu_sync_keep_later(&e->fence, f);
143 /* Preserve eplicit flag to not loose pipe line sync */
144 e->explicit |= explicit;
146 return true;
148 return false;
152 * amdgpu_sync_fence - remember to sync to this fence
154 * @sync: sync object to add fence to
155 * @f: fence to sync to
156 * @explicit: if this is an explicit dependency
158 * Add the fence to the sync object.
160 int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
161 bool explicit)
163 struct amdgpu_sync_entry *e;
165 if (!f)
166 return 0;
168 if (amdgpu_sync_add_later(sync, f, explicit))
169 return 0;
171 e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
172 if (!e)
173 return -ENOMEM;
175 e->explicit = explicit;
177 hash_add(sync->fences, &e->node, f->context);
178 e->fence = dma_fence_get(f);
179 return 0;
183 * amdgpu_sync_vm_fence - remember to sync to this VM fence
185 * @adev: amdgpu device
186 * @sync: sync object to add fence to
187 * @fence: the VM fence to add
189 * Add the fence to the sync object and remember it as VM update.
191 int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence)
193 if (!fence)
194 return 0;
196 amdgpu_sync_keep_later(&sync->last_vm_update, fence);
197 return amdgpu_sync_fence(sync, fence, false);
201 * amdgpu_sync_resv - sync to a reservation object
203 * @sync: sync object to add fences from reservation object to
204 * @resv: reservation object with embedded fence
205 * @explicit_sync: true if we should only sync to the exclusive fence
207 * Sync to the fence
209 int amdgpu_sync_resv(struct amdgpu_device *adev,
210 struct amdgpu_sync *sync,
211 struct dma_resv *resv,
212 void *owner, bool explicit_sync)
214 struct dma_resv_list *flist;
215 struct dma_fence *f;
216 void *fence_owner;
217 unsigned i;
218 int r = 0;
220 if (resv == NULL)
221 return -EINVAL;
223 /* always sync to the exclusive fence */
224 f = dma_resv_get_excl(resv);
225 r = amdgpu_sync_fence(sync, f, false);
227 flist = dma_resv_get_list(resv);
228 if (!flist || r)
229 return r;
231 for (i = 0; i < flist->shared_count; ++i) {
232 f = rcu_dereference_protected(flist->shared[i],
233 dma_resv_held(resv));
234 /* We only want to trigger KFD eviction fences on
235 * evict or move jobs. Skip KFD fences otherwise.
237 fence_owner = amdgpu_sync_get_owner(f);
238 if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
239 owner != AMDGPU_FENCE_OWNER_UNDEFINED)
240 continue;
242 if (amdgpu_sync_same_dev(adev, f)) {
243 /* VM updates only sync with moves but not with user
244 * command submissions or KFD evictions fences
246 if (owner == AMDGPU_FENCE_OWNER_VM &&
247 fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED)
248 continue;
250 /* Ignore fence from the same owner and explicit one as
251 * long as it isn't undefined.
253 if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
254 (fence_owner == owner || explicit_sync))
255 continue;
258 r = amdgpu_sync_fence(sync, f, false);
259 if (r)
260 break;
262 return r;
266 * amdgpu_sync_peek_fence - get the next fence not signaled yet
268 * @sync: the sync object
269 * @ring: optional ring to use for test
271 * Returns the next fence not signaled yet without removing it from the sync
272 * object.
274 struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
275 struct amdgpu_ring *ring)
277 struct amdgpu_sync_entry *e;
278 struct hlist_node *tmp;
279 int i;
281 hash_for_each_safe(sync->fences, i, tmp, e, node) {
282 struct dma_fence *f = e->fence;
283 struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
285 if (dma_fence_is_signaled(f)) {
286 hash_del(&e->node);
287 dma_fence_put(f);
288 kmem_cache_free(amdgpu_sync_slab, e);
289 continue;
291 if (ring && s_fence) {
292 /* For fences from the same ring it is sufficient
293 * when they are scheduled.
295 if (s_fence->sched == &ring->sched) {
296 if (dma_fence_is_signaled(&s_fence->scheduled))
297 continue;
299 return &s_fence->scheduled;
303 return f;
306 return NULL;
310 * amdgpu_sync_get_fence - get the next fence from the sync object
312 * @sync: sync object to use
313 * @explicit: true if the next fence is explicit
315 * Get and removes the next fence from the sync object not signaled yet.
317 struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit)
319 struct amdgpu_sync_entry *e;
320 struct hlist_node *tmp;
321 struct dma_fence *f;
322 int i;
323 hash_for_each_safe(sync->fences, i, tmp, e, node) {
325 f = e->fence;
326 if (explicit)
327 *explicit = e->explicit;
329 hash_del(&e->node);
330 kmem_cache_free(amdgpu_sync_slab, e);
332 if (!dma_fence_is_signaled(f))
333 return f;
335 dma_fence_put(f);
337 return NULL;
341 * amdgpu_sync_clone - clone a sync object
343 * @source: sync object to clone
344 * @clone: pointer to destination sync object
346 * Adds references to all unsignaled fences in @source to @clone. Also
347 * removes signaled fences from @source while at it.
349 int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
351 struct amdgpu_sync_entry *e;
352 struct hlist_node *tmp;
353 struct dma_fence *f;
354 int i, r;
356 hash_for_each_safe(source->fences, i, tmp, e, node) {
357 f = e->fence;
358 if (!dma_fence_is_signaled(f)) {
359 r = amdgpu_sync_fence(clone, f, e->explicit);
360 if (r)
361 return r;
362 } else {
363 hash_del(&e->node);
364 dma_fence_put(f);
365 kmem_cache_free(amdgpu_sync_slab, e);
369 dma_fence_put(clone->last_vm_update);
370 clone->last_vm_update = dma_fence_get(source->last_vm_update);
372 return 0;
375 int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
377 struct amdgpu_sync_entry *e;
378 struct hlist_node *tmp;
379 int i, r;
381 hash_for_each_safe(sync->fences, i, tmp, e, node) {
382 r = dma_fence_wait(e->fence, intr);
383 if (r)
384 return r;
386 hash_del(&e->node);
387 dma_fence_put(e->fence);
388 kmem_cache_free(amdgpu_sync_slab, e);
391 return 0;
395 * amdgpu_sync_free - free the sync object
397 * @sync: sync object to use
399 * Free the sync object.
401 void amdgpu_sync_free(struct amdgpu_sync *sync)
403 struct amdgpu_sync_entry *e;
404 struct hlist_node *tmp;
405 unsigned i;
407 hash_for_each_safe(sync->fences, i, tmp, e, node) {
408 hash_del(&e->node);
409 dma_fence_put(e->fence);
410 kmem_cache_free(amdgpu_sync_slab, e);
413 dma_fence_put(sync->last_vm_update);
417 * amdgpu_sync_init - init sync object subsystem
419 * Allocate the slab allocator.
421 int amdgpu_sync_init(void)
423 amdgpu_sync_slab = kmem_cache_create(
424 "amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
425 SLAB_HWCACHE_ALIGN, NULL);
426 if (!amdgpu_sync_slab)
427 return -ENOMEM;
429 return 0;
433 * amdgpu_sync_fini - fini sync object subsystem
435 * Free the slab allocator.
437 void amdgpu_sync_fini(void)
439 kmem_cache_destroy(amdgpu_sync_slab);