treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / nouveau / nouveau_fence.c
blob666f2090d92b269b5826d5a5256873cd32620ed1
1 /*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include <linux/ktime.h>
28 #include <linux/hrtimer.h>
29 #include <linux/sched/signal.h>
30 #include <trace/events/dma_fence.h>
32 #include <nvif/cl826e.h>
33 #include <nvif/notify.h>
34 #include <nvif/event.h>
36 #include "nouveau_drv.h"
37 #include "nouveau_dma.h"
38 #include "nouveau_fence.h"
40 static const struct dma_fence_ops nouveau_fence_ops_uevent;
41 static const struct dma_fence_ops nouveau_fence_ops_legacy;
43 static inline struct nouveau_fence *
44 from_fence(struct dma_fence *fence)
46 return container_of(fence, struct nouveau_fence, base);
49 static inline struct nouveau_fence_chan *
50 nouveau_fctx(struct nouveau_fence *fence)
52 return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
55 static int
56 nouveau_fence_signal(struct nouveau_fence *fence)
58 int drop = 0;
60 dma_fence_signal_locked(&fence->base);
61 list_del(&fence->head);
62 rcu_assign_pointer(fence->channel, NULL);
64 if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) {
65 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
67 if (!--fctx->notify_ref)
68 drop = 1;
71 dma_fence_put(&fence->base);
72 return drop;
75 static struct nouveau_fence *
76 nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm)
78 if (fence->ops != &nouveau_fence_ops_legacy &&
79 fence->ops != &nouveau_fence_ops_uevent)
80 return NULL;
82 if (fence->context < drm->chan.context_base ||
83 fence->context >= drm->chan.context_base + drm->chan.nr)
84 return NULL;
86 return from_fence(fence);
89 void
90 nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
92 struct nouveau_fence *fence;
94 spin_lock_irq(&fctx->lock);
95 while (!list_empty(&fctx->pending)) {
96 fence = list_entry(fctx->pending.next, typeof(*fence), head);
98 if (error)
99 dma_fence_set_error(&fence->base, error);
101 if (nouveau_fence_signal(fence))
102 nvif_notify_put(&fctx->notify);
104 spin_unlock_irq(&fctx->lock);
107 void
108 nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
110 nouveau_fence_context_kill(fctx, 0);
111 nvif_notify_fini(&fctx->notify);
112 fctx->dead = 1;
115 * Ensure that all accesses to fence->channel complete before freeing
116 * the channel.
118 synchronize_rcu();
121 static void
122 nouveau_fence_context_put(struct kref *fence_ref)
124 kfree(container_of(fence_ref, struct nouveau_fence_chan, fence_ref));
127 void
128 nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
130 kref_put(&fctx->fence_ref, nouveau_fence_context_put);
133 static int
134 nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
136 struct nouveau_fence *fence;
137 int drop = 0;
138 u32 seq = fctx->read(chan);
140 while (!list_empty(&fctx->pending)) {
141 fence = list_entry(fctx->pending.next, typeof(*fence), head);
143 if ((int)(seq - fence->base.seqno) < 0)
144 break;
146 drop |= nouveau_fence_signal(fence);
149 return drop;
152 static int
153 nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
155 struct nouveau_fence_chan *fctx =
156 container_of(notify, typeof(*fctx), notify);
157 unsigned long flags;
158 int ret = NVIF_NOTIFY_KEEP;
160 spin_lock_irqsave(&fctx->lock, flags);
161 if (!list_empty(&fctx->pending)) {
162 struct nouveau_fence *fence;
163 struct nouveau_channel *chan;
165 fence = list_entry(fctx->pending.next, typeof(*fence), head);
166 chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
167 if (nouveau_fence_update(chan, fctx))
168 ret = NVIF_NOTIFY_DROP;
170 spin_unlock_irqrestore(&fctx->lock, flags);
172 return ret;
175 void
176 nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
178 struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
179 struct nouveau_cli *cli = (void *)chan->user.client;
180 int ret;
182 INIT_LIST_HEAD(&fctx->flip);
183 INIT_LIST_HEAD(&fctx->pending);
184 spin_lock_init(&fctx->lock);
185 fctx->context = chan->drm->chan.context_base + chan->chid;
187 if (chan == chan->drm->cechan)
188 strcpy(fctx->name, "copy engine channel");
189 else if (chan == chan->drm->channel)
190 strcpy(fctx->name, "generic kernel channel");
191 else
192 strcpy(fctx->name, nvxx_client(&cli->base)->name);
194 kref_init(&fctx->fence_ref);
195 if (!priv->uevent)
196 return;
198 ret = nvif_notify_init(&chan->user, nouveau_fence_wait_uevent_handler,
199 false, NV826E_V0_NTFY_NON_STALL_INTERRUPT,
200 &(struct nvif_notify_uevent_req) { },
201 sizeof(struct nvif_notify_uevent_req),
202 sizeof(struct nvif_notify_uevent_rep),
203 &fctx->notify);
205 WARN_ON(ret);
209 nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
211 struct nouveau_fence_chan *fctx = chan->fence;
212 struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
213 int ret;
215 fence->channel = chan;
216 fence->timeout = jiffies + (15 * HZ);
218 if (priv->uevent)
219 dma_fence_init(&fence->base, &nouveau_fence_ops_uevent,
220 &fctx->lock, fctx->context, ++fctx->sequence);
221 else
222 dma_fence_init(&fence->base, &nouveau_fence_ops_legacy,
223 &fctx->lock, fctx->context, ++fctx->sequence);
224 kref_get(&fctx->fence_ref);
226 trace_dma_fence_emit(&fence->base);
227 ret = fctx->emit(fence);
228 if (!ret) {
229 dma_fence_get(&fence->base);
230 spin_lock_irq(&fctx->lock);
232 if (nouveau_fence_update(chan, fctx))
233 nvif_notify_put(&fctx->notify);
235 list_add_tail(&fence->head, &fctx->pending);
236 spin_unlock_irq(&fctx->lock);
239 return ret;
242 bool
243 nouveau_fence_done(struct nouveau_fence *fence)
245 if (fence->base.ops == &nouveau_fence_ops_legacy ||
246 fence->base.ops == &nouveau_fence_ops_uevent) {
247 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
248 struct nouveau_channel *chan;
249 unsigned long flags;
251 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
252 return true;
254 spin_lock_irqsave(&fctx->lock, flags);
255 chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
256 if (chan && nouveau_fence_update(chan, fctx))
257 nvif_notify_put(&fctx->notify);
258 spin_unlock_irqrestore(&fctx->lock, flags);
260 return dma_fence_is_signaled(&fence->base);
263 static long
264 nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait)
266 struct nouveau_fence *fence = from_fence(f);
267 unsigned long sleep_time = NSEC_PER_MSEC / 1000;
268 unsigned long t = jiffies, timeout = t + wait;
270 while (!nouveau_fence_done(fence)) {
271 ktime_t kt;
273 t = jiffies;
275 if (wait != MAX_SCHEDULE_TIMEOUT && time_after_eq(t, timeout)) {
276 __set_current_state(TASK_RUNNING);
277 return 0;
280 __set_current_state(intr ? TASK_INTERRUPTIBLE :
281 TASK_UNINTERRUPTIBLE);
283 kt = sleep_time;
284 schedule_hrtimeout(&kt, HRTIMER_MODE_REL);
285 sleep_time *= 2;
286 if (sleep_time > NSEC_PER_MSEC)
287 sleep_time = NSEC_PER_MSEC;
289 if (intr && signal_pending(current))
290 return -ERESTARTSYS;
293 __set_current_state(TASK_RUNNING);
295 return timeout - t;
298 static int
299 nouveau_fence_wait_busy(struct nouveau_fence *fence, bool intr)
301 int ret = 0;
303 while (!nouveau_fence_done(fence)) {
304 if (time_after_eq(jiffies, fence->timeout)) {
305 ret = -EBUSY;
306 break;
309 __set_current_state(intr ?
310 TASK_INTERRUPTIBLE :
311 TASK_UNINTERRUPTIBLE);
313 if (intr && signal_pending(current)) {
314 ret = -ERESTARTSYS;
315 break;
319 __set_current_state(TASK_RUNNING);
320 return ret;
324 nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
326 long ret;
328 if (!lazy)
329 return nouveau_fence_wait_busy(fence, intr);
331 ret = dma_fence_wait_timeout(&fence->base, intr, 15 * HZ);
332 if (ret < 0)
333 return ret;
334 else if (!ret)
335 return -EBUSY;
336 else
337 return 0;
341 nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr)
343 struct nouveau_fence_chan *fctx = chan->fence;
344 struct dma_fence *fence;
345 struct dma_resv *resv = nvbo->bo.base.resv;
346 struct dma_resv_list *fobj;
347 struct nouveau_fence *f;
348 int ret = 0, i;
350 if (!exclusive) {
351 ret = dma_resv_reserve_shared(resv, 1);
353 if (ret)
354 return ret;
357 fobj = dma_resv_get_list(resv);
358 fence = dma_resv_get_excl(resv);
360 if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
361 struct nouveau_channel *prev = NULL;
362 bool must_wait = true;
364 f = nouveau_local_fence(fence, chan->drm);
365 if (f) {
366 rcu_read_lock();
367 prev = rcu_dereference(f->channel);
368 if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
369 must_wait = false;
370 rcu_read_unlock();
373 if (must_wait)
374 ret = dma_fence_wait(fence, intr);
376 return ret;
379 if (!exclusive || !fobj)
380 return ret;
382 for (i = 0; i < fobj->shared_count && !ret; ++i) {
383 struct nouveau_channel *prev = NULL;
384 bool must_wait = true;
386 fence = rcu_dereference_protected(fobj->shared[i],
387 dma_resv_held(resv));
389 f = nouveau_local_fence(fence, chan->drm);
390 if (f) {
391 rcu_read_lock();
392 prev = rcu_dereference(f->channel);
393 if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
394 must_wait = false;
395 rcu_read_unlock();
398 if (must_wait)
399 ret = dma_fence_wait(fence, intr);
402 return ret;
405 void
406 nouveau_fence_unref(struct nouveau_fence **pfence)
408 if (*pfence)
409 dma_fence_put(&(*pfence)->base);
410 *pfence = NULL;
414 nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
415 struct nouveau_fence **pfence)
417 struct nouveau_fence *fence;
418 int ret = 0;
420 if (unlikely(!chan->fence))
421 return -ENODEV;
423 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
424 if (!fence)
425 return -ENOMEM;
427 ret = nouveau_fence_emit(fence, chan);
428 if (ret)
429 nouveau_fence_unref(&fence);
431 *pfence = fence;
432 return ret;
435 static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence)
437 return "nouveau";
440 static const char *nouveau_fence_get_timeline_name(struct dma_fence *f)
442 struct nouveau_fence *fence = from_fence(f);
443 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
445 return !fctx->dead ? fctx->name : "dead channel";
449 * In an ideal world, read would not assume the channel context is still alive.
450 * This function may be called from another device, running into free memory as a
451 * result. The drm node should still be there, so we can derive the index from
452 * the fence context.
454 static bool nouveau_fence_is_signaled(struct dma_fence *f)
456 struct nouveau_fence *fence = from_fence(f);
457 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
458 struct nouveau_channel *chan;
459 bool ret = false;
461 rcu_read_lock();
462 chan = rcu_dereference(fence->channel);
463 if (chan)
464 ret = (int)(fctx->read(chan) - fence->base.seqno) >= 0;
465 rcu_read_unlock();
467 return ret;
470 static bool nouveau_fence_no_signaling(struct dma_fence *f)
472 struct nouveau_fence *fence = from_fence(f);
475 * caller should have a reference on the fence,
476 * else fence could get freed here
478 WARN_ON(kref_read(&fence->base.refcount) <= 1);
481 * This needs uevents to work correctly, but dma_fence_add_callback relies on
482 * being able to enable signaling. It will still get signaled eventually,
483 * just not right away.
485 if (nouveau_fence_is_signaled(f)) {
486 list_del(&fence->head);
488 dma_fence_put(&fence->base);
489 return false;
492 return true;
495 static void nouveau_fence_release(struct dma_fence *f)
497 struct nouveau_fence *fence = from_fence(f);
498 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
500 kref_put(&fctx->fence_ref, nouveau_fence_context_put);
501 dma_fence_free(&fence->base);
504 static const struct dma_fence_ops nouveau_fence_ops_legacy = {
505 .get_driver_name = nouveau_fence_get_get_driver_name,
506 .get_timeline_name = nouveau_fence_get_timeline_name,
507 .enable_signaling = nouveau_fence_no_signaling,
508 .signaled = nouveau_fence_is_signaled,
509 .wait = nouveau_fence_wait_legacy,
510 .release = nouveau_fence_release
513 static bool nouveau_fence_enable_signaling(struct dma_fence *f)
515 struct nouveau_fence *fence = from_fence(f);
516 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
517 bool ret;
519 if (!fctx->notify_ref++)
520 nvif_notify_get(&fctx->notify);
522 ret = nouveau_fence_no_signaling(f);
523 if (ret)
524 set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags);
525 else if (!--fctx->notify_ref)
526 nvif_notify_put(&fctx->notify);
528 return ret;
531 static const struct dma_fence_ops nouveau_fence_ops_uevent = {
532 .get_driver_name = nouveau_fence_get_get_driver_name,
533 .get_timeline_name = nouveau_fence_get_timeline_name,
534 .enable_signaling = nouveau_fence_enable_signaling,
535 .signaled = nouveau_fence_is_signaled,
536 .release = nouveau_fence_release