2 * Copyright (C) 2007 Ben Skeggs.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include <linux/ktime.h>
30 #include <linux/hrtimer.h>
31 #include <trace/events/fence.h>
33 #include <nvif/notify.h>
34 #include <nvif/event.h>
36 #include "nouveau_drm.h"
37 #include "nouveau_dma.h"
38 #include "nouveau_fence.h"
40 static const struct fence_ops nouveau_fence_ops_uevent
;
41 static const struct fence_ops nouveau_fence_ops_legacy
;
43 static inline struct nouveau_fence
*
44 from_fence(struct fence
*fence
)
46 return container_of(fence
, struct nouveau_fence
, base
);
49 static inline struct nouveau_fence_chan
*
50 nouveau_fctx(struct nouveau_fence
*fence
)
52 return container_of(fence
->base
.lock
, struct nouveau_fence_chan
, lock
);
56 nouveau_fence_signal(struct nouveau_fence
*fence
)
60 fence_signal_locked(&fence
->base
);
61 list_del(&fence
->head
);
62 rcu_assign_pointer(fence
->channel
, NULL
);
64 if (test_bit(FENCE_FLAG_USER_BITS
, &fence
->base
.flags
)) {
65 struct nouveau_fence_chan
*fctx
= nouveau_fctx(fence
);
67 if (!--fctx
->notify_ref
)
71 fence_put(&fence
->base
);
75 static struct nouveau_fence
*
76 nouveau_local_fence(struct fence
*fence
, struct nouveau_drm
*drm
) {
77 struct nouveau_fence_priv
*priv
= (void*)drm
->fence
;
79 if (fence
->ops
!= &nouveau_fence_ops_legacy
&&
80 fence
->ops
!= &nouveau_fence_ops_uevent
)
83 if (fence
->context
< priv
->context_base
||
84 fence
->context
>= priv
->context_base
+ priv
->contexts
)
87 return from_fence(fence
);
91 nouveau_fence_context_del(struct nouveau_fence_chan
*fctx
)
93 struct nouveau_fence
*fence
;
95 spin_lock_irq(&fctx
->lock
);
96 while (!list_empty(&fctx
->pending
)) {
97 fence
= list_entry(fctx
->pending
.next
, typeof(*fence
), head
);
99 if (nouveau_fence_signal(fence
))
100 nvif_notify_put(&fctx
->notify
);
102 spin_unlock_irq(&fctx
->lock
);
104 nvif_notify_fini(&fctx
->notify
);
108 * Ensure that all accesses to fence->channel complete before freeing
115 nouveau_fence_context_put(struct kref
*fence_ref
)
117 kfree(container_of(fence_ref
, struct nouveau_fence_chan
, fence_ref
));
121 nouveau_fence_context_free(struct nouveau_fence_chan
*fctx
)
123 kref_put(&fctx
->fence_ref
, nouveau_fence_context_put
);
127 nouveau_fence_update(struct nouveau_channel
*chan
, struct nouveau_fence_chan
*fctx
)
129 struct nouveau_fence
*fence
;
131 u32 seq
= fctx
->read(chan
);
133 while (!list_empty(&fctx
->pending
)) {
134 fence
= list_entry(fctx
->pending
.next
, typeof(*fence
), head
);
136 if ((int)(seq
- fence
->base
.seqno
) < 0)
139 drop
|= nouveau_fence_signal(fence
);
146 nouveau_fence_wait_uevent_handler(struct nvif_notify
*notify
)
148 struct nouveau_fence_chan
*fctx
=
149 container_of(notify
, typeof(*fctx
), notify
);
151 int ret
= NVIF_NOTIFY_KEEP
;
153 spin_lock_irqsave(&fctx
->lock
, flags
);
154 if (!list_empty(&fctx
->pending
)) {
155 struct nouveau_fence
*fence
;
156 struct nouveau_channel
*chan
;
158 fence
= list_entry(fctx
->pending
.next
, typeof(*fence
), head
);
159 chan
= rcu_dereference_protected(fence
->channel
, lockdep_is_held(&fctx
->lock
));
160 if (nouveau_fence_update(fence
->channel
, fctx
))
161 ret
= NVIF_NOTIFY_DROP
;
163 spin_unlock_irqrestore(&fctx
->lock
, flags
);
169 nouveau_fence_context_new(struct nouveau_channel
*chan
, struct nouveau_fence_chan
*fctx
)
171 struct nouveau_fence_priv
*priv
= (void*)chan
->drm
->fence
;
172 struct nouveau_cli
*cli
= (void *)chan
->user
.client
;
175 INIT_LIST_HEAD(&fctx
->flip
);
176 INIT_LIST_HEAD(&fctx
->pending
);
177 spin_lock_init(&fctx
->lock
);
178 fctx
->context
= priv
->context_base
+ chan
->chid
;
180 if (chan
== chan
->drm
->cechan
)
181 strcpy(fctx
->name
, "copy engine channel");
182 else if (chan
== chan
->drm
->channel
)
183 strcpy(fctx
->name
, "generic kernel channel");
185 strcpy(fctx
->name
, nvxx_client(&cli
->base
)->name
);
187 kref_init(&fctx
->fence_ref
);
191 ret
= nvif_notify_init(&chan
->user
, nouveau_fence_wait_uevent_handler
,
192 false, G82_CHANNEL_DMA_V0_NTFY_UEVENT
,
193 &(struct nvif_notify_uevent_req
) { },
194 sizeof(struct nvif_notify_uevent_req
),
195 sizeof(struct nvif_notify_uevent_rep
),
201 struct nouveau_fence_work
{
202 struct work_struct work
;
204 void (*func
)(void *);
209 nouveau_fence_work_handler(struct work_struct
*kwork
)
211 struct nouveau_fence_work
*work
= container_of(kwork
, typeof(*work
), work
);
212 work
->func(work
->data
);
216 static void nouveau_fence_work_cb(struct fence
*fence
, struct fence_cb
*cb
)
218 struct nouveau_fence_work
*work
= container_of(cb
, typeof(*work
), cb
);
220 schedule_work(&work
->work
);
224 nouveau_fence_work(struct fence
*fence
,
225 void (*func
)(void *), void *data
)
227 struct nouveau_fence_work
*work
;
229 if (fence_is_signaled(fence
))
232 work
= kmalloc(sizeof(*work
), GFP_KERNEL
);
235 * this might not be a nouveau fence any more,
236 * so force a lazy wait here
238 WARN_ON(nouveau_fence_wait((struct nouveau_fence
*)fence
,
243 INIT_WORK(&work
->work
, nouveau_fence_work_handler
);
247 if (fence_add_callback(fence
, &work
->cb
, nouveau_fence_work_cb
) < 0)
258 nouveau_fence_emit(struct nouveau_fence
*fence
, struct nouveau_channel
*chan
)
260 struct nouveau_fence_chan
*fctx
= chan
->fence
;
261 struct nouveau_fence_priv
*priv
= (void*)chan
->drm
->fence
;
264 fence
->channel
= chan
;
265 fence
->timeout
= jiffies
+ (15 * HZ
);
268 fence_init(&fence
->base
, &nouveau_fence_ops_uevent
,
269 &fctx
->lock
, fctx
->context
, ++fctx
->sequence
);
271 fence_init(&fence
->base
, &nouveau_fence_ops_legacy
,
272 &fctx
->lock
, fctx
->context
, ++fctx
->sequence
);
273 kref_get(&fctx
->fence_ref
);
275 trace_fence_emit(&fence
->base
);
276 ret
= fctx
->emit(fence
);
278 fence_get(&fence
->base
);
279 spin_lock_irq(&fctx
->lock
);
281 if (nouveau_fence_update(chan
, fctx
))
282 nvif_notify_put(&fctx
->notify
);
284 list_add_tail(&fence
->head
, &fctx
->pending
);
285 spin_unlock_irq(&fctx
->lock
);
292 nouveau_fence_done(struct nouveau_fence
*fence
)
294 if (fence
->base
.ops
== &nouveau_fence_ops_legacy
||
295 fence
->base
.ops
== &nouveau_fence_ops_uevent
) {
296 struct nouveau_fence_chan
*fctx
= nouveau_fctx(fence
);
297 struct nouveau_channel
*chan
;
300 if (test_bit(FENCE_FLAG_SIGNALED_BIT
, &fence
->base
.flags
))
303 spin_lock_irqsave(&fctx
->lock
, flags
);
304 chan
= rcu_dereference_protected(fence
->channel
, lockdep_is_held(&fctx
->lock
));
305 if (chan
&& nouveau_fence_update(chan
, fctx
))
306 nvif_notify_put(&fctx
->notify
);
307 spin_unlock_irqrestore(&fctx
->lock
, flags
);
309 return fence_is_signaled(&fence
->base
);
313 nouveau_fence_wait_legacy(struct fence
*f
, bool intr
, long wait
)
315 struct nouveau_fence
*fence
= from_fence(f
);
316 unsigned long sleep_time
= NSEC_PER_MSEC
/ 1000;
317 unsigned long t
= jiffies
, timeout
= t
+ wait
;
319 while (!nouveau_fence_done(fence
)) {
324 if (wait
!= MAX_SCHEDULE_TIMEOUT
&& time_after_eq(t
, timeout
)) {
325 __set_current_state(TASK_RUNNING
);
329 __set_current_state(intr
? TASK_INTERRUPTIBLE
:
330 TASK_UNINTERRUPTIBLE
);
332 kt
= ktime_set(0, sleep_time
);
333 schedule_hrtimeout(&kt
, HRTIMER_MODE_REL
);
335 if (sleep_time
> NSEC_PER_MSEC
)
336 sleep_time
= NSEC_PER_MSEC
;
338 if (intr
&& signal_pending(current
))
342 __set_current_state(TASK_RUNNING
);
348 nouveau_fence_wait_busy(struct nouveau_fence
*fence
, bool intr
)
352 while (!nouveau_fence_done(fence
)) {
353 if (time_after_eq(jiffies
, fence
->timeout
)) {
358 __set_current_state(intr
?
360 TASK_UNINTERRUPTIBLE
);
362 if (intr
&& signal_pending(current
)) {
368 __set_current_state(TASK_RUNNING
);
373 nouveau_fence_wait(struct nouveau_fence
*fence
, bool lazy
, bool intr
)
378 return nouveau_fence_wait_busy(fence
, intr
);
380 ret
= fence_wait_timeout(&fence
->base
, intr
, 15 * HZ
);
390 nouveau_fence_sync(struct nouveau_bo
*nvbo
, struct nouveau_channel
*chan
, bool exclusive
, bool intr
)
392 struct nouveau_fence_chan
*fctx
= chan
->fence
;
394 struct reservation_object
*resv
= nvbo
->bo
.resv
;
395 struct reservation_object_list
*fobj
;
396 struct nouveau_fence
*f
;
400 ret
= reservation_object_reserve_shared(resv
);
406 fobj
= reservation_object_get_list(resv
);
407 fence
= reservation_object_get_excl(resv
);
409 if (fence
&& (!exclusive
|| !fobj
|| !fobj
->shared_count
)) {
410 struct nouveau_channel
*prev
= NULL
;
411 bool must_wait
= true;
413 f
= nouveau_local_fence(fence
, chan
->drm
);
416 prev
= rcu_dereference(f
->channel
);
417 if (prev
&& (prev
== chan
|| fctx
->sync(f
, prev
, chan
) == 0))
423 ret
= fence_wait(fence
, intr
);
428 if (!exclusive
|| !fobj
)
431 for (i
= 0; i
< fobj
->shared_count
&& !ret
; ++i
) {
432 struct nouveau_channel
*prev
= NULL
;
433 bool must_wait
= true;
435 fence
= rcu_dereference_protected(fobj
->shared
[i
],
436 reservation_object_held(resv
));
438 f
= nouveau_local_fence(fence
, chan
->drm
);
441 prev
= rcu_dereference(f
->channel
);
442 if (prev
&& (prev
== chan
|| fctx
->sync(f
, prev
, chan
) == 0))
448 ret
= fence_wait(fence
, intr
);
455 nouveau_fence_unref(struct nouveau_fence
**pfence
)
458 fence_put(&(*pfence
)->base
);
463 nouveau_fence_new(struct nouveau_channel
*chan
, bool sysmem
,
464 struct nouveau_fence
**pfence
)
466 struct nouveau_fence
*fence
;
469 if (unlikely(!chan
->fence
))
472 fence
= kzalloc(sizeof(*fence
), GFP_KERNEL
);
476 fence
->sysmem
= sysmem
;
478 ret
= nouveau_fence_emit(fence
, chan
);
480 nouveau_fence_unref(&fence
);
486 static const char *nouveau_fence_get_get_driver_name(struct fence
*fence
)
491 static const char *nouveau_fence_get_timeline_name(struct fence
*f
)
493 struct nouveau_fence
*fence
= from_fence(f
);
494 struct nouveau_fence_chan
*fctx
= nouveau_fctx(fence
);
496 return !fctx
->dead
? fctx
->name
: "dead channel";
500 * In an ideal world, read would not assume the channel context is still alive.
501 * This function may be called from another device, running into free memory as a
502 * result. The drm node should still be there, so we can derive the index from
505 static bool nouveau_fence_is_signaled(struct fence
*f
)
507 struct nouveau_fence
*fence
= from_fence(f
);
508 struct nouveau_fence_chan
*fctx
= nouveau_fctx(fence
);
509 struct nouveau_channel
*chan
;
513 chan
= rcu_dereference(fence
->channel
);
515 ret
= (int)(fctx
->read(chan
) - fence
->base
.seqno
) >= 0;
521 static bool nouveau_fence_no_signaling(struct fence
*f
)
523 struct nouveau_fence
*fence
= from_fence(f
);
526 * caller should have a reference on the fence,
527 * else fence could get freed here
529 WARN_ON(atomic_read(&fence
->base
.refcount
.refcount
) <= 1);
532 * This needs uevents to work correctly, but fence_add_callback relies on
533 * being able to enable signaling. It will still get signaled eventually,
534 * just not right away.
536 if (nouveau_fence_is_signaled(f
)) {
537 list_del(&fence
->head
);
539 fence_put(&fence
->base
);
546 static void nouveau_fence_release(struct fence
*f
)
548 struct nouveau_fence
*fence
= from_fence(f
);
549 struct nouveau_fence_chan
*fctx
= nouveau_fctx(fence
);
551 kref_put(&fctx
->fence_ref
, nouveau_fence_context_put
);
552 fence_free(&fence
->base
);
555 static const struct fence_ops nouveau_fence_ops_legacy
= {
556 .get_driver_name
= nouveau_fence_get_get_driver_name
,
557 .get_timeline_name
= nouveau_fence_get_timeline_name
,
558 .enable_signaling
= nouveau_fence_no_signaling
,
559 .signaled
= nouveau_fence_is_signaled
,
560 .wait
= nouveau_fence_wait_legacy
,
561 .release
= nouveau_fence_release
564 static bool nouveau_fence_enable_signaling(struct fence
*f
)
566 struct nouveau_fence
*fence
= from_fence(f
);
567 struct nouveau_fence_chan
*fctx
= nouveau_fctx(fence
);
570 if (!fctx
->notify_ref
++)
571 nvif_notify_get(&fctx
->notify
);
573 ret
= nouveau_fence_no_signaling(f
);
575 set_bit(FENCE_FLAG_USER_BITS
, &fence
->base
.flags
);
576 else if (!--fctx
->notify_ref
)
577 nvif_notify_put(&fctx
->notify
);
582 static const struct fence_ops nouveau_fence_ops_uevent
= {
583 .get_driver_name
= nouveau_fence_get_get_driver_name
,
584 .get_timeline_name
= nouveau_fence_get_timeline_name
,
585 .enable_signaling
= nouveau_fence_enable_signaling
,
586 .signaled
= nouveau_fence_is_signaled
,
587 .wait
= fence_default_wait
,