2 * Copyright (C) 2007 Ben Skeggs.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include <linux/ktime.h>
30 #include <linux/hrtimer.h>
31 #include <trace/events/fence.h>
33 #include <nvif/notify.h>
34 #include <nvif/event.h>
36 #include "nouveau_drm.h"
37 #include "nouveau_dma.h"
38 #include "nouveau_fence.h"
40 static const struct fence_ops nouveau_fence_ops_uevent
;
41 static const struct fence_ops nouveau_fence_ops_legacy
;
43 static inline struct nouveau_fence
*
44 from_fence(struct fence
*fence
)
46 return container_of(fence
, struct nouveau_fence
, base
);
49 static inline struct nouveau_fence_chan
*
50 nouveau_fctx(struct nouveau_fence
*fence
)
52 return container_of(fence
->base
.lock
, struct nouveau_fence_chan
, lock
);
56 nouveau_fence_signal(struct nouveau_fence
*fence
)
60 fence_signal_locked(&fence
->base
);
61 list_del(&fence
->head
);
62 rcu_assign_pointer(fence
->channel
, NULL
);
64 if (test_bit(FENCE_FLAG_USER_BITS
, &fence
->base
.flags
)) {
65 struct nouveau_fence_chan
*fctx
= nouveau_fctx(fence
);
67 if (!--fctx
->notify_ref
)
71 fence_put(&fence
->base
);
75 static struct nouveau_fence
*
76 nouveau_local_fence(struct fence
*fence
, struct nouveau_drm
*drm
) {
77 struct nouveau_fence_priv
*priv
= (void*)drm
->fence
;
79 if (fence
->ops
!= &nouveau_fence_ops_legacy
&&
80 fence
->ops
!= &nouveau_fence_ops_uevent
)
83 if (fence
->context
< priv
->context_base
||
84 fence
->context
>= priv
->context_base
+ priv
->contexts
)
87 return from_fence(fence
);
91 nouveau_fence_context_del(struct nouveau_fence_chan
*fctx
)
93 struct nouveau_fence
*fence
;
95 spin_lock_irq(&fctx
->lock
);
96 while (!list_empty(&fctx
->pending
)) {
97 fence
= list_entry(fctx
->pending
.next
, typeof(*fence
), head
);
99 if (nouveau_fence_signal(fence
))
100 nvif_notify_put(&fctx
->notify
);
102 spin_unlock_irq(&fctx
->lock
);
104 nvif_notify_fini(&fctx
->notify
);
108 * Ensure that all accesses to fence->channel complete before freeing
115 nouveau_fence_context_put(struct kref
*fence_ref
)
117 kfree(container_of(fence_ref
, struct nouveau_fence_chan
, fence_ref
));
121 nouveau_fence_context_free(struct nouveau_fence_chan
*fctx
)
123 kref_put(&fctx
->fence_ref
, nouveau_fence_context_put
);
127 nouveau_fence_update(struct nouveau_channel
*chan
, struct nouveau_fence_chan
*fctx
)
129 struct nouveau_fence
*fence
;
131 u32 seq
= fctx
->read(chan
);
133 while (!list_empty(&fctx
->pending
)) {
134 fence
= list_entry(fctx
->pending
.next
, typeof(*fence
), head
);
136 if ((int)(seq
- fence
->base
.seqno
) < 0)
139 drop
|= nouveau_fence_signal(fence
);
146 nouveau_fence_wait_uevent_handler(struct nvif_notify
*notify
)
148 struct nouveau_fence_chan
*fctx
=
149 container_of(notify
, typeof(*fctx
), notify
);
151 int ret
= NVIF_NOTIFY_KEEP
;
153 spin_lock_irqsave(&fctx
->lock
, flags
);
154 if (!list_empty(&fctx
->pending
)) {
155 struct nouveau_fence
*fence
;
156 struct nouveau_channel
*chan
;
158 fence
= list_entry(fctx
->pending
.next
, typeof(*fence
), head
);
159 chan
= rcu_dereference_protected(fence
->channel
, lockdep_is_held(&fctx
->lock
));
160 if (nouveau_fence_update(fence
->channel
, fctx
))
161 ret
= NVIF_NOTIFY_DROP
;
163 spin_unlock_irqrestore(&fctx
->lock
, flags
);
169 nouveau_fence_context_new(struct nouveau_channel
*chan
, struct nouveau_fence_chan
*fctx
)
171 struct nouveau_fence_priv
*priv
= (void*)chan
->drm
->fence
;
172 struct nouveau_cli
*cli
= (void *)nvif_client(chan
->object
);
175 INIT_LIST_HEAD(&fctx
->flip
);
176 INIT_LIST_HEAD(&fctx
->pending
);
177 spin_lock_init(&fctx
->lock
);
178 fctx
->context
= priv
->context_base
+ chan
->chid
;
180 if (chan
== chan
->drm
->cechan
)
181 strcpy(fctx
->name
, "copy engine channel");
182 else if (chan
== chan
->drm
->channel
)
183 strcpy(fctx
->name
, "generic kernel channel");
185 strcpy(fctx
->name
, nvxx_client(&cli
->base
)->name
);
187 kref_init(&fctx
->fence_ref
);
191 ret
= nvif_notify_init(chan
->object
, NULL
,
192 nouveau_fence_wait_uevent_handler
, false,
193 G82_CHANNEL_DMA_V0_NTFY_UEVENT
,
194 &(struct nvif_notify_uevent_req
) { },
195 sizeof(struct nvif_notify_uevent_req
),
196 sizeof(struct nvif_notify_uevent_rep
),
202 struct nouveau_fence_work
{
203 struct work_struct work
;
205 void (*func
)(void *);
210 nouveau_fence_work_handler(struct work_struct
*kwork
)
212 struct nouveau_fence_work
*work
= container_of(kwork
, typeof(*work
), work
);
213 work
->func(work
->data
);
217 static void nouveau_fence_work_cb(struct fence
*fence
, struct fence_cb
*cb
)
219 struct nouveau_fence_work
*work
= container_of(cb
, typeof(*work
), cb
);
221 schedule_work(&work
->work
);
225 nouveau_fence_work(struct fence
*fence
,
226 void (*func
)(void *), void *data
)
228 struct nouveau_fence_work
*work
;
230 if (fence_is_signaled(fence
))
233 work
= kmalloc(sizeof(*work
), GFP_KERNEL
);
236 * this might not be a nouveau fence any more,
237 * so force a lazy wait here
239 WARN_ON(nouveau_fence_wait((struct nouveau_fence
*)fence
,
244 INIT_WORK(&work
->work
, nouveau_fence_work_handler
);
248 if (fence_add_callback(fence
, &work
->cb
, nouveau_fence_work_cb
) < 0)
259 nouveau_fence_emit(struct nouveau_fence
*fence
, struct nouveau_channel
*chan
)
261 struct nouveau_fence_chan
*fctx
= chan
->fence
;
262 struct nouveau_fence_priv
*priv
= (void*)chan
->drm
->fence
;
265 fence
->channel
= chan
;
266 fence
->timeout
= jiffies
+ (15 * HZ
);
269 fence_init(&fence
->base
, &nouveau_fence_ops_uevent
,
270 &fctx
->lock
, fctx
->context
, ++fctx
->sequence
);
272 fence_init(&fence
->base
, &nouveau_fence_ops_legacy
,
273 &fctx
->lock
, fctx
->context
, ++fctx
->sequence
);
274 kref_get(&fctx
->fence_ref
);
276 trace_fence_emit(&fence
->base
);
277 ret
= fctx
->emit(fence
);
279 fence_get(&fence
->base
);
280 spin_lock_irq(&fctx
->lock
);
282 if (nouveau_fence_update(chan
, fctx
))
283 nvif_notify_put(&fctx
->notify
);
285 list_add_tail(&fence
->head
, &fctx
->pending
);
286 spin_unlock_irq(&fctx
->lock
);
293 nouveau_fence_done(struct nouveau_fence
*fence
)
295 if (fence
->base
.ops
== &nouveau_fence_ops_legacy
||
296 fence
->base
.ops
== &nouveau_fence_ops_uevent
) {
297 struct nouveau_fence_chan
*fctx
= nouveau_fctx(fence
);
298 struct nouveau_channel
*chan
;
301 if (test_bit(FENCE_FLAG_SIGNALED_BIT
, &fence
->base
.flags
))
304 spin_lock_irqsave(&fctx
->lock
, flags
);
305 chan
= rcu_dereference_protected(fence
->channel
, lockdep_is_held(&fctx
->lock
));
306 if (chan
&& nouveau_fence_update(chan
, fctx
))
307 nvif_notify_put(&fctx
->notify
);
308 spin_unlock_irqrestore(&fctx
->lock
, flags
);
310 return fence_is_signaled(&fence
->base
);
314 nouveau_fence_wait_legacy(struct fence
*f
, bool intr
, long wait
)
316 struct nouveau_fence
*fence
= from_fence(f
);
317 unsigned long sleep_time
= NSEC_PER_MSEC
/ 1000;
318 unsigned long t
= jiffies
, timeout
= t
+ wait
;
320 while (!nouveau_fence_done(fence
)) {
325 if (wait
!= MAX_SCHEDULE_TIMEOUT
&& time_after_eq(t
, timeout
)) {
326 __set_current_state(TASK_RUNNING
);
330 __set_current_state(intr
? TASK_INTERRUPTIBLE
:
331 TASK_UNINTERRUPTIBLE
);
333 kt
= ktime_set(0, sleep_time
);
334 schedule_hrtimeout(&kt
, HRTIMER_MODE_REL
);
336 if (sleep_time
> NSEC_PER_MSEC
)
337 sleep_time
= NSEC_PER_MSEC
;
339 if (intr
&& signal_pending(current
))
343 __set_current_state(TASK_RUNNING
);
349 nouveau_fence_wait_busy(struct nouveau_fence
*fence
, bool intr
)
353 while (!nouveau_fence_done(fence
)) {
354 if (time_after_eq(jiffies
, fence
->timeout
)) {
359 __set_current_state(intr
?
361 TASK_UNINTERRUPTIBLE
);
363 if (intr
&& signal_pending(current
)) {
369 __set_current_state(TASK_RUNNING
);
374 nouveau_fence_wait(struct nouveau_fence
*fence
, bool lazy
, bool intr
)
379 return nouveau_fence_wait_busy(fence
, intr
);
381 ret
= fence_wait_timeout(&fence
->base
, intr
, 15 * HZ
);
391 nouveau_fence_sync(struct nouveau_bo
*nvbo
, struct nouveau_channel
*chan
, bool exclusive
, bool intr
)
393 struct nouveau_fence_chan
*fctx
= chan
->fence
;
395 struct reservation_object
*resv
= nvbo
->bo
.resv
;
396 struct reservation_object_list
*fobj
;
397 struct nouveau_fence
*f
;
401 ret
= reservation_object_reserve_shared(resv
);
407 fobj
= reservation_object_get_list(resv
);
408 fence
= reservation_object_get_excl(resv
);
410 if (fence
&& (!exclusive
|| !fobj
|| !fobj
->shared_count
)) {
411 struct nouveau_channel
*prev
= NULL
;
412 bool must_wait
= true;
414 f
= nouveau_local_fence(fence
, chan
->drm
);
417 prev
= rcu_dereference(f
->channel
);
418 if (prev
&& (prev
== chan
|| fctx
->sync(f
, prev
, chan
) == 0))
424 ret
= fence_wait(fence
, intr
);
429 if (!exclusive
|| !fobj
)
432 for (i
= 0; i
< fobj
->shared_count
&& !ret
; ++i
) {
433 struct nouveau_channel
*prev
= NULL
;
434 bool must_wait
= true;
436 fence
= rcu_dereference_protected(fobj
->shared
[i
],
437 reservation_object_held(resv
));
439 f
= nouveau_local_fence(fence
, chan
->drm
);
442 prev
= rcu_dereference(f
->channel
);
443 if (prev
&& (prev
== chan
|| fctx
->sync(f
, prev
, chan
) == 0))
449 ret
= fence_wait(fence
, intr
);
456 nouveau_fence_unref(struct nouveau_fence
**pfence
)
459 fence_put(&(*pfence
)->base
);
464 nouveau_fence_new(struct nouveau_channel
*chan
, bool sysmem
,
465 struct nouveau_fence
**pfence
)
467 struct nouveau_fence
*fence
;
470 if (unlikely(!chan
->fence
))
473 fence
= kzalloc(sizeof(*fence
), GFP_KERNEL
);
477 fence
->sysmem
= sysmem
;
479 ret
= nouveau_fence_emit(fence
, chan
);
481 nouveau_fence_unref(&fence
);
487 static const char *nouveau_fence_get_get_driver_name(struct fence
*fence
)
492 static const char *nouveau_fence_get_timeline_name(struct fence
*f
)
494 struct nouveau_fence
*fence
= from_fence(f
);
495 struct nouveau_fence_chan
*fctx
= nouveau_fctx(fence
);
497 return !fctx
->dead
? fctx
->name
: "dead channel";
501 * In an ideal world, read would not assume the channel context is still alive.
502 * This function may be called from another device, running into free memory as a
503 * result. The drm node should still be there, so we can derive the index from
506 static bool nouveau_fence_is_signaled(struct fence
*f
)
508 struct nouveau_fence
*fence
= from_fence(f
);
509 struct nouveau_fence_chan
*fctx
= nouveau_fctx(fence
);
510 struct nouveau_channel
*chan
;
514 chan
= rcu_dereference(fence
->channel
);
516 ret
= (int)(fctx
->read(chan
) - fence
->base
.seqno
) >= 0;
522 static bool nouveau_fence_no_signaling(struct fence
*f
)
524 struct nouveau_fence
*fence
= from_fence(f
);
527 * caller should have a reference on the fence,
528 * else fence could get freed here
530 WARN_ON(atomic_read(&fence
->base
.refcount
.refcount
) <= 1);
533 * This needs uevents to work correctly, but fence_add_callback relies on
534 * being able to enable signaling. It will still get signaled eventually,
535 * just not right away.
537 if (nouveau_fence_is_signaled(f
)) {
538 list_del(&fence
->head
);
540 fence_put(&fence
->base
);
547 static void nouveau_fence_release(struct fence
*f
)
549 struct nouveau_fence
*fence
= from_fence(f
);
550 struct nouveau_fence_chan
*fctx
= nouveau_fctx(fence
);
552 kref_put(&fctx
->fence_ref
, nouveau_fence_context_put
);
553 fence_free(&fence
->base
);
556 static const struct fence_ops nouveau_fence_ops_legacy
= {
557 .get_driver_name
= nouveau_fence_get_get_driver_name
,
558 .get_timeline_name
= nouveau_fence_get_timeline_name
,
559 .enable_signaling
= nouveau_fence_no_signaling
,
560 .signaled
= nouveau_fence_is_signaled
,
561 .wait
= nouveau_fence_wait_legacy
,
562 .release
= nouveau_fence_release
565 static bool nouveau_fence_enable_signaling(struct fence
*f
)
567 struct nouveau_fence
*fence
= from_fence(f
);
568 struct nouveau_fence_chan
*fctx
= nouveau_fctx(fence
);
571 if (!fctx
->notify_ref
++)
572 nvif_notify_get(&fctx
->notify
);
574 ret
= nouveau_fence_no_signaling(f
);
576 set_bit(FENCE_FLAG_USER_BITS
, &fence
->base
.flags
);
577 else if (!--fctx
->notify_ref
)
578 nvif_notify_put(&fctx
->notify
);
583 static const struct fence_ops nouveau_fence_ops_uevent
= {
584 .get_driver_name
= nouveau_fence_get_get_driver_name
,
585 .get_timeline_name
= nouveau_fence_get_timeline_name
,
586 .enable_signaling
= nouveau_fence_enable_signaling
,
587 .signaled
= nouveau_fence_is_signaled
,
588 .wait
= fence_default_wait
,