2 * Copyright (C) 2007 Ben Skeggs.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include <linux/ktime.h>
28 #include <linux/hrtimer.h>
29 #include <linux/sched/signal.h>
30 #include <trace/events/dma_fence.h>
32 #include <nvif/cl826e.h>
33 #include <nvif/notify.h>
34 #include <nvif/event.h>
36 #include "nouveau_drv.h"
37 #include "nouveau_dma.h"
38 #include "nouveau_fence.h"
40 static const struct dma_fence_ops nouveau_fence_ops_uevent
;
41 static const struct dma_fence_ops nouveau_fence_ops_legacy
;
43 static inline struct nouveau_fence
*
44 from_fence(struct dma_fence
*fence
)
46 return container_of(fence
, struct nouveau_fence
, base
);
49 static inline struct nouveau_fence_chan
*
50 nouveau_fctx(struct nouveau_fence
*fence
)
52 return container_of(fence
->base
.lock
, struct nouveau_fence_chan
, lock
);
56 nouveau_fence_signal(struct nouveau_fence
*fence
)
60 dma_fence_signal_locked(&fence
->base
);
61 list_del(&fence
->head
);
62 rcu_assign_pointer(fence
->channel
, NULL
);
64 if (test_bit(DMA_FENCE_FLAG_USER_BITS
, &fence
->base
.flags
)) {
65 struct nouveau_fence_chan
*fctx
= nouveau_fctx(fence
);
67 if (!--fctx
->notify_ref
)
71 dma_fence_put(&fence
->base
);
75 static struct nouveau_fence
*
76 nouveau_local_fence(struct dma_fence
*fence
, struct nouveau_drm
*drm
)
78 if (fence
->ops
!= &nouveau_fence_ops_legacy
&&
79 fence
->ops
!= &nouveau_fence_ops_uevent
)
82 if (fence
->context
< drm
->chan
.context_base
||
83 fence
->context
>= drm
->chan
.context_base
+ drm
->chan
.nr
)
86 return from_fence(fence
);
90 nouveau_fence_context_kill(struct nouveau_fence_chan
*fctx
, int error
)
92 struct nouveau_fence
*fence
;
94 spin_lock_irq(&fctx
->lock
);
95 while (!list_empty(&fctx
->pending
)) {
96 fence
= list_entry(fctx
->pending
.next
, typeof(*fence
), head
);
99 dma_fence_set_error(&fence
->base
, error
);
101 if (nouveau_fence_signal(fence
))
102 nvif_notify_put(&fctx
->notify
);
104 spin_unlock_irq(&fctx
->lock
);
108 nouveau_fence_context_del(struct nouveau_fence_chan
*fctx
)
110 nouveau_fence_context_kill(fctx
, 0);
111 nvif_notify_dtor(&fctx
->notify
);
115 * Ensure that all accesses to fence->channel complete before freeing
122 nouveau_fence_context_put(struct kref
*fence_ref
)
124 kfree(container_of(fence_ref
, struct nouveau_fence_chan
, fence_ref
));
128 nouveau_fence_context_free(struct nouveau_fence_chan
*fctx
)
130 kref_put(&fctx
->fence_ref
, nouveau_fence_context_put
);
134 nouveau_fence_update(struct nouveau_channel
*chan
, struct nouveau_fence_chan
*fctx
)
136 struct nouveau_fence
*fence
;
138 u32 seq
= fctx
->read(chan
);
140 while (!list_empty(&fctx
->pending
)) {
141 fence
= list_entry(fctx
->pending
.next
, typeof(*fence
), head
);
143 if ((int)(seq
- fence
->base
.seqno
) < 0)
146 drop
|= nouveau_fence_signal(fence
);
153 nouveau_fence_wait_uevent_handler(struct nvif_notify
*notify
)
155 struct nouveau_fence_chan
*fctx
=
156 container_of(notify
, typeof(*fctx
), notify
);
158 int ret
= NVIF_NOTIFY_KEEP
;
160 spin_lock_irqsave(&fctx
->lock
, flags
);
161 if (!list_empty(&fctx
->pending
)) {
162 struct nouveau_fence
*fence
;
163 struct nouveau_channel
*chan
;
165 fence
= list_entry(fctx
->pending
.next
, typeof(*fence
), head
);
166 chan
= rcu_dereference_protected(fence
->channel
, lockdep_is_held(&fctx
->lock
));
167 if (nouveau_fence_update(chan
, fctx
))
168 ret
= NVIF_NOTIFY_DROP
;
170 spin_unlock_irqrestore(&fctx
->lock
, flags
);
176 nouveau_fence_context_new(struct nouveau_channel
*chan
, struct nouveau_fence_chan
*fctx
)
178 struct nouveau_fence_priv
*priv
= (void*)chan
->drm
->fence
;
179 struct nouveau_cli
*cli
= (void *)chan
->user
.client
;
182 INIT_LIST_HEAD(&fctx
->flip
);
183 INIT_LIST_HEAD(&fctx
->pending
);
184 spin_lock_init(&fctx
->lock
);
185 fctx
->context
= chan
->drm
->chan
.context_base
+ chan
->chid
;
187 if (chan
== chan
->drm
->cechan
)
188 strcpy(fctx
->name
, "copy engine channel");
189 else if (chan
== chan
->drm
->channel
)
190 strcpy(fctx
->name
, "generic kernel channel");
192 strcpy(fctx
->name
, nvxx_client(&cli
->base
)->name
);
194 kref_init(&fctx
->fence_ref
);
198 ret
= nvif_notify_ctor(&chan
->user
, "fenceNonStallIntr",
199 nouveau_fence_wait_uevent_handler
,
200 false, NV826E_V0_NTFY_NON_STALL_INTERRUPT
,
201 &(struct nvif_notify_uevent_req
) { },
202 sizeof(struct nvif_notify_uevent_req
),
203 sizeof(struct nvif_notify_uevent_rep
),
210 nouveau_fence_emit(struct nouveau_fence
*fence
, struct nouveau_channel
*chan
)
212 struct nouveau_fence_chan
*fctx
= chan
->fence
;
213 struct nouveau_fence_priv
*priv
= (void*)chan
->drm
->fence
;
216 fence
->channel
= chan
;
217 fence
->timeout
= jiffies
+ (15 * HZ
);
220 dma_fence_init(&fence
->base
, &nouveau_fence_ops_uevent
,
221 &fctx
->lock
, fctx
->context
, ++fctx
->sequence
);
223 dma_fence_init(&fence
->base
, &nouveau_fence_ops_legacy
,
224 &fctx
->lock
, fctx
->context
, ++fctx
->sequence
);
225 kref_get(&fctx
->fence_ref
);
227 trace_dma_fence_emit(&fence
->base
);
228 ret
= fctx
->emit(fence
);
230 dma_fence_get(&fence
->base
);
231 spin_lock_irq(&fctx
->lock
);
233 if (nouveau_fence_update(chan
, fctx
))
234 nvif_notify_put(&fctx
->notify
);
236 list_add_tail(&fence
->head
, &fctx
->pending
);
237 spin_unlock_irq(&fctx
->lock
);
244 nouveau_fence_done(struct nouveau_fence
*fence
)
246 if (fence
->base
.ops
== &nouveau_fence_ops_legacy
||
247 fence
->base
.ops
== &nouveau_fence_ops_uevent
) {
248 struct nouveau_fence_chan
*fctx
= nouveau_fctx(fence
);
249 struct nouveau_channel
*chan
;
252 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &fence
->base
.flags
))
255 spin_lock_irqsave(&fctx
->lock
, flags
);
256 chan
= rcu_dereference_protected(fence
->channel
, lockdep_is_held(&fctx
->lock
));
257 if (chan
&& nouveau_fence_update(chan
, fctx
))
258 nvif_notify_put(&fctx
->notify
);
259 spin_unlock_irqrestore(&fctx
->lock
, flags
);
261 return dma_fence_is_signaled(&fence
->base
);
265 nouveau_fence_wait_legacy(struct dma_fence
*f
, bool intr
, long wait
)
267 struct nouveau_fence
*fence
= from_fence(f
);
268 unsigned long sleep_time
= NSEC_PER_MSEC
/ 1000;
269 unsigned long t
= jiffies
, timeout
= t
+ wait
;
271 while (!nouveau_fence_done(fence
)) {
276 if (wait
!= MAX_SCHEDULE_TIMEOUT
&& time_after_eq(t
, timeout
)) {
277 __set_current_state(TASK_RUNNING
);
281 __set_current_state(intr
? TASK_INTERRUPTIBLE
:
282 TASK_UNINTERRUPTIBLE
);
285 schedule_hrtimeout(&kt
, HRTIMER_MODE_REL
);
287 if (sleep_time
> NSEC_PER_MSEC
)
288 sleep_time
= NSEC_PER_MSEC
;
290 if (intr
&& signal_pending(current
))
294 __set_current_state(TASK_RUNNING
);
300 nouveau_fence_wait_busy(struct nouveau_fence
*fence
, bool intr
)
304 while (!nouveau_fence_done(fence
)) {
305 if (time_after_eq(jiffies
, fence
->timeout
)) {
310 __set_current_state(intr
?
312 TASK_UNINTERRUPTIBLE
);
314 if (intr
&& signal_pending(current
)) {
320 __set_current_state(TASK_RUNNING
);
325 nouveau_fence_wait(struct nouveau_fence
*fence
, bool lazy
, bool intr
)
330 return nouveau_fence_wait_busy(fence
, intr
);
332 ret
= dma_fence_wait_timeout(&fence
->base
, intr
, 15 * HZ
);
342 nouveau_fence_sync(struct nouveau_bo
*nvbo
, struct nouveau_channel
*chan
, bool exclusive
, bool intr
)
344 struct nouveau_fence_chan
*fctx
= chan
->fence
;
345 struct dma_fence
*fence
;
346 struct dma_resv
*resv
= nvbo
->bo
.base
.resv
;
347 struct dma_resv_list
*fobj
;
348 struct nouveau_fence
*f
;
352 ret
= dma_resv_reserve_shared(resv
, 1);
358 fobj
= dma_resv_get_list(resv
);
359 fence
= dma_resv_get_excl(resv
);
361 if (fence
&& (!exclusive
|| !fobj
|| !fobj
->shared_count
)) {
362 struct nouveau_channel
*prev
= NULL
;
363 bool must_wait
= true;
365 f
= nouveau_local_fence(fence
, chan
->drm
);
368 prev
= rcu_dereference(f
->channel
);
369 if (prev
&& (prev
== chan
|| fctx
->sync(f
, prev
, chan
) == 0))
375 ret
= dma_fence_wait(fence
, intr
);
380 if (!exclusive
|| !fobj
)
383 for (i
= 0; i
< fobj
->shared_count
&& !ret
; ++i
) {
384 struct nouveau_channel
*prev
= NULL
;
385 bool must_wait
= true;
387 fence
= rcu_dereference_protected(fobj
->shared
[i
],
388 dma_resv_held(resv
));
390 f
= nouveau_local_fence(fence
, chan
->drm
);
393 prev
= rcu_dereference(f
->channel
);
394 if (prev
&& (prev
== chan
|| fctx
->sync(f
, prev
, chan
) == 0))
400 ret
= dma_fence_wait(fence
, intr
);
407 nouveau_fence_unref(struct nouveau_fence
**pfence
)
410 dma_fence_put(&(*pfence
)->base
);
415 nouveau_fence_new(struct nouveau_channel
*chan
, bool sysmem
,
416 struct nouveau_fence
**pfence
)
418 struct nouveau_fence
*fence
;
421 if (unlikely(!chan
->fence
))
424 fence
= kzalloc(sizeof(*fence
), GFP_KERNEL
);
428 ret
= nouveau_fence_emit(fence
, chan
);
430 nouveau_fence_unref(&fence
);
436 static const char *nouveau_fence_get_get_driver_name(struct dma_fence
*fence
)
441 static const char *nouveau_fence_get_timeline_name(struct dma_fence
*f
)
443 struct nouveau_fence
*fence
= from_fence(f
);
444 struct nouveau_fence_chan
*fctx
= nouveau_fctx(fence
);
446 return !fctx
->dead
? fctx
->name
: "dead channel";
450 * In an ideal world, read would not assume the channel context is still alive.
451 * This function may be called from another device, running into free memory as a
452 * result. The drm node should still be there, so we can derive the index from
455 static bool nouveau_fence_is_signaled(struct dma_fence
*f
)
457 struct nouveau_fence
*fence
= from_fence(f
);
458 struct nouveau_fence_chan
*fctx
= nouveau_fctx(fence
);
459 struct nouveau_channel
*chan
;
463 chan
= rcu_dereference(fence
->channel
);
465 ret
= (int)(fctx
->read(chan
) - fence
->base
.seqno
) >= 0;
471 static bool nouveau_fence_no_signaling(struct dma_fence
*f
)
473 struct nouveau_fence
*fence
= from_fence(f
);
476 * caller should have a reference on the fence,
477 * else fence could get freed here
479 WARN_ON(kref_read(&fence
->base
.refcount
) <= 1);
482 * This needs uevents to work correctly, but dma_fence_add_callback relies on
483 * being able to enable signaling. It will still get signaled eventually,
484 * just not right away.
486 if (nouveau_fence_is_signaled(f
)) {
487 list_del(&fence
->head
);
489 dma_fence_put(&fence
->base
);
496 static void nouveau_fence_release(struct dma_fence
*f
)
498 struct nouveau_fence
*fence
= from_fence(f
);
499 struct nouveau_fence_chan
*fctx
= nouveau_fctx(fence
);
501 kref_put(&fctx
->fence_ref
, nouveau_fence_context_put
);
502 dma_fence_free(&fence
->base
);
505 static const struct dma_fence_ops nouveau_fence_ops_legacy
= {
506 .get_driver_name
= nouveau_fence_get_get_driver_name
,
507 .get_timeline_name
= nouveau_fence_get_timeline_name
,
508 .enable_signaling
= nouveau_fence_no_signaling
,
509 .signaled
= nouveau_fence_is_signaled
,
510 .wait
= nouveau_fence_wait_legacy
,
511 .release
= nouveau_fence_release
514 static bool nouveau_fence_enable_signaling(struct dma_fence
*f
)
516 struct nouveau_fence
*fence
= from_fence(f
);
517 struct nouveau_fence_chan
*fctx
= nouveau_fctx(fence
);
520 if (!fctx
->notify_ref
++)
521 nvif_notify_get(&fctx
->notify
);
523 ret
= nouveau_fence_no_signaling(f
);
525 set_bit(DMA_FENCE_FLAG_USER_BITS
, &fence
->base
.flags
);
526 else if (!--fctx
->notify_ref
)
527 nvif_notify_put(&fctx
->notify
);
532 static const struct dma_fence_ops nouveau_fence_ops_uevent
= {
533 .get_driver_name
= nouveau_fence_get_get_driver_name
,
534 .get_timeline_name
= nouveau_fence_get_timeline_name
,
535 .enable_signaling
= nouveau_fence_enable_signaling
,
536 .signaled
= nouveau_fence_is_signaled
,
537 .release
= nouveau_fence_release