2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/prefetch.h>
26 #include <linux/dma-fence-array.h>
27 #include <linux/sched.h>
28 #include <linux/sched/clock.h>
29 #include <linux/sched/signal.h>
33 static const char *i915_fence_get_driver_name(struct dma_fence
*fence
)
38 static const char *i915_fence_get_timeline_name(struct dma_fence
*fence
)
40 /* The timeline struct (as part of the ppgtt underneath a context)
41 * may be freed when the request is no longer in use by the GPU.
42 * We could extend the life of a context to beyond that of all
43 * fences, possibly keeping the hw resource around indefinitely,
44 * or we just give them a false name. Since
45 * dma_fence_ops.get_timeline_name is a debug feature, the occasional
46 * lie seems justifiable.
48 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &fence
->flags
))
51 return to_request(fence
)->timeline
->common
->name
;
54 static bool i915_fence_signaled(struct dma_fence
*fence
)
56 return i915_gem_request_completed(to_request(fence
));
59 static bool i915_fence_enable_signaling(struct dma_fence
*fence
)
61 if (i915_fence_signaled(fence
))
64 intel_engine_enable_signaling(to_request(fence
), true);
65 return !i915_fence_signaled(fence
);
68 static signed long i915_fence_wait(struct dma_fence
*fence
,
72 return i915_wait_request(to_request(fence
), interruptible
, timeout
);
75 static void i915_fence_release(struct dma_fence
*fence
)
77 struct drm_i915_gem_request
*req
= to_request(fence
);
79 /* The request is put onto a RCU freelist (i.e. the address
80 * is immediately reused), mark the fences as being freed now.
81 * Otherwise the debugobjects for the fences are only marked as
82 * freed when the slab cache itself is freed, and so we would get
83 * caught trying to reuse dead objects.
85 i915_sw_fence_fini(&req
->submit
);
87 kmem_cache_free(req
->i915
->requests
, req
);
90 const struct dma_fence_ops i915_fence_ops
= {
91 .get_driver_name
= i915_fence_get_driver_name
,
92 .get_timeline_name
= i915_fence_get_timeline_name
,
93 .enable_signaling
= i915_fence_enable_signaling
,
94 .signaled
= i915_fence_signaled
,
95 .wait
= i915_fence_wait
,
96 .release
= i915_fence_release
,
100 i915_gem_request_remove_from_client(struct drm_i915_gem_request
*request
)
102 struct drm_i915_file_private
*file_priv
;
104 file_priv
= request
->file_priv
;
108 spin_lock(&file_priv
->mm
.lock
);
109 if (request
->file_priv
) {
110 list_del(&request
->client_link
);
111 request
->file_priv
= NULL
;
113 spin_unlock(&file_priv
->mm
.lock
);
116 static struct i915_dependency
*
117 i915_dependency_alloc(struct drm_i915_private
*i915
)
119 return kmem_cache_alloc(i915
->dependencies
, GFP_KERNEL
);
123 i915_dependency_free(struct drm_i915_private
*i915
,
124 struct i915_dependency
*dep
)
126 kmem_cache_free(i915
->dependencies
, dep
);
130 __i915_priotree_add_dependency(struct i915_priotree
*pt
,
131 struct i915_priotree
*signal
,
132 struct i915_dependency
*dep
,
135 INIT_LIST_HEAD(&dep
->dfs_link
);
136 list_add(&dep
->wait_link
, &signal
->waiters_list
);
137 list_add(&dep
->signal_link
, &pt
->signalers_list
);
138 dep
->signaler
= signal
;
143 i915_priotree_add_dependency(struct drm_i915_private
*i915
,
144 struct i915_priotree
*pt
,
145 struct i915_priotree
*signal
)
147 struct i915_dependency
*dep
;
149 dep
= i915_dependency_alloc(i915
);
153 __i915_priotree_add_dependency(pt
, signal
, dep
, I915_DEPENDENCY_ALLOC
);
158 i915_priotree_fini(struct drm_i915_private
*i915
, struct i915_priotree
*pt
)
160 struct i915_dependency
*dep
, *next
;
162 GEM_BUG_ON(!list_empty(&pt
->link
));
164 /* Everyone we depended upon (the fences we wait to be signaled)
165 * should retire before us and remove themselves from our list.
166 * However, retirement is run independently on each timeline and
167 * so we may be called out-of-order.
169 list_for_each_entry_safe(dep
, next
, &pt
->signalers_list
, signal_link
) {
170 list_del(&dep
->wait_link
);
171 if (dep
->flags
& I915_DEPENDENCY_ALLOC
)
172 i915_dependency_free(i915
, dep
);
175 /* Remove ourselves from everyone who depends upon us */
176 list_for_each_entry_safe(dep
, next
, &pt
->waiters_list
, wait_link
) {
177 list_del(&dep
->signal_link
);
178 if (dep
->flags
& I915_DEPENDENCY_ALLOC
)
179 i915_dependency_free(i915
, dep
);
184 i915_priotree_init(struct i915_priotree
*pt
)
186 INIT_LIST_HEAD(&pt
->signalers_list
);
187 INIT_LIST_HEAD(&pt
->waiters_list
);
188 INIT_LIST_HEAD(&pt
->link
);
189 pt
->priority
= I915_PRIORITY_INVALID
;
192 static int reset_all_global_seqno(struct drm_i915_private
*i915
, u32 seqno
)
194 struct intel_engine_cs
*engine
;
195 enum intel_engine_id id
;
198 /* Carefully retire all requests without writing to the rings */
199 ret
= i915_gem_wait_for_idle(i915
,
200 I915_WAIT_INTERRUPTIBLE
|
205 /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
206 for_each_engine(engine
, i915
, id
) {
207 struct i915_gem_timeline
*timeline
;
208 struct intel_timeline
*tl
= engine
->timeline
;
210 if (!i915_seqno_passed(seqno
, tl
->seqno
)) {
211 /* spin until threads are complete */
212 while (intel_breadcrumbs_busy(engine
))
216 /* Check we are idle before we fiddle with hw state! */
217 GEM_BUG_ON(!intel_engine_is_idle(engine
));
218 GEM_BUG_ON(i915_gem_active_isset(&engine
->timeline
->last_request
));
220 /* Finally reset hw state */
221 intel_engine_init_global_seqno(engine
, seqno
);
224 list_for_each_entry(timeline
, &i915
->gt
.timelines
, link
)
225 memset(timeline
->engine
[id
].global_sync
, 0,
226 sizeof(timeline
->engine
[id
].global_sync
));
232 int i915_gem_set_global_seqno(struct drm_device
*dev
, u32 seqno
)
234 struct drm_i915_private
*dev_priv
= to_i915(dev
);
236 lockdep_assert_held(&dev_priv
->drm
.struct_mutex
);
241 /* HWS page needs to be set less than what we
242 * will inject to ring
244 return reset_all_global_seqno(dev_priv
, seqno
- 1);
247 static void mark_busy(struct drm_i915_private
*i915
)
252 GEM_BUG_ON(!i915
->gt
.active_requests
);
254 intel_runtime_pm_get_noresume(i915
);
257 * It seems that the DMC likes to transition between the DC states a lot
258 * when there are no connected displays (no active power domains) during
259 * command submission.
261 * This activity has negative impact on the performance of the chip with
262 * huge latencies observed in the interrupt handler and elsewhere.
264 * Work around it by grabbing a GT IRQ power domain whilst there is any
265 * GT activity, preventing any DC state transitions.
267 intel_display_power_get(i915
, POWER_DOMAIN_GT_IRQ
);
269 i915
->gt
.awake
= true;
271 intel_enable_gt_powersave(i915
);
272 i915_update_gfx_val(i915
);
273 if (INTEL_GEN(i915
) >= 6)
275 i915_pmu_gt_unparked(i915
);
277 intel_engines_unpark(i915
);
279 i915_queue_hangcheck(i915
);
281 queue_delayed_work(i915
->wq
,
282 &i915
->gt
.retire_work
,
283 round_jiffies_up_relative(HZ
));
286 static int reserve_engine(struct intel_engine_cs
*engine
)
288 struct drm_i915_private
*i915
= engine
->i915
;
289 u32 active
= ++engine
->timeline
->inflight_seqnos
;
290 u32 seqno
= engine
->timeline
->seqno
;
293 /* Reservation is fine until we need to wrap around */
294 if (unlikely(add_overflows(seqno
, active
))) {
295 ret
= reset_all_global_seqno(i915
, 0);
297 engine
->timeline
->inflight_seqnos
--;
302 if (!i915
->gt
.active_requests
++)
308 static void unreserve_engine(struct intel_engine_cs
*engine
)
310 struct drm_i915_private
*i915
= engine
->i915
;
312 if (!--i915
->gt
.active_requests
) {
313 /* Cancel the mark_busy() from our reserve_engine() */
314 GEM_BUG_ON(!i915
->gt
.awake
);
315 mod_delayed_work(i915
->wq
,
317 msecs_to_jiffies(100));
320 GEM_BUG_ON(!engine
->timeline
->inflight_seqnos
);
321 engine
->timeline
->inflight_seqnos
--;
324 void i915_gem_retire_noop(struct i915_gem_active
*active
,
325 struct drm_i915_gem_request
*request
)
327 /* Space left intentionally blank */
330 static void advance_ring(struct drm_i915_gem_request
*request
)
334 /* We know the GPU must have read the request to have
335 * sent us the seqno + interrupt, so use the position
336 * of tail of the request to update the last known position
339 * Note this requires that we are always called in request
342 if (list_is_last(&request
->ring_link
, &request
->ring
->request_list
)) {
343 /* We may race here with execlists resubmitting this request
344 * as we retire it. The resubmission will move the ring->tail
345 * forwards (to request->wa_tail). We either read the
346 * current value that was written to hw, or the value that
347 * is just about to be. Either works, if we miss the last two
348 * noops - they are safe to be replayed on a reset.
350 tail
= READ_ONCE(request
->ring
->tail
);
352 tail
= request
->postfix
;
354 list_del(&request
->ring_link
);
356 request
->ring
->head
= tail
;
359 static void free_capture_list(struct drm_i915_gem_request
*request
)
361 struct i915_gem_capture_list
*capture
;
363 capture
= request
->capture_list
;
365 struct i915_gem_capture_list
*next
= capture
->next
;
372 static void i915_gem_request_retire(struct drm_i915_gem_request
*request
)
374 struct intel_engine_cs
*engine
= request
->engine
;
375 struct i915_gem_active
*active
, *next
;
377 lockdep_assert_held(&request
->i915
->drm
.struct_mutex
);
378 GEM_BUG_ON(!i915_sw_fence_signaled(&request
->submit
));
379 GEM_BUG_ON(!i915_gem_request_completed(request
));
380 GEM_BUG_ON(!request
->i915
->gt
.active_requests
);
382 trace_i915_gem_request_retire(request
);
384 spin_lock_irq(&engine
->timeline
->lock
);
385 list_del_init(&request
->link
);
386 spin_unlock_irq(&engine
->timeline
->lock
);
388 unreserve_engine(request
->engine
);
389 advance_ring(request
);
391 free_capture_list(request
);
393 /* Walk through the active list, calling retire on each. This allows
394 * objects to track their GPU activity and mark themselves as idle
395 * when their *last* active request is completed (updating state
396 * tracking lists for eviction, active references for GEM, etc).
398 * As the ->retire() may free the node, we decouple it first and
399 * pass along the auxiliary information (to avoid dereferencing
400 * the node after the callback).
402 list_for_each_entry_safe(active
, next
, &request
->active_list
, link
) {
403 /* In microbenchmarks or focusing upon time inside the kernel,
404 * we may spend an inordinate amount of time simply handling
405 * the retirement of requests and processing their callbacks.
406 * Of which, this loop itself is particularly hot due to the
407 * cache misses when jumping around the list of i915_gem_active.
408 * So we try to keep this loop as streamlined as possible and
409 * also prefetch the next i915_gem_active to try and hide
410 * the likely cache miss.
414 INIT_LIST_HEAD(&active
->link
);
415 RCU_INIT_POINTER(active
->request
, NULL
);
417 active
->retire(active
, request
);
420 i915_gem_request_remove_from_client(request
);
422 /* Retirement decays the ban score as it is a sign of ctx progress */
423 atomic_dec_if_positive(&request
->ctx
->ban_score
);
425 /* The backing object for the context is done after switching to the
426 * *next* context. Therefore we cannot retire the previous context until
427 * the next context has already started running. However, since we
428 * cannot take the required locks at i915_gem_request_submit() we
429 * defer the unpinning of the active context to now, retirement of
430 * the subsequent request.
432 if (engine
->last_retired_context
)
433 engine
->context_unpin(engine
, engine
->last_retired_context
);
434 engine
->last_retired_context
= request
->ctx
;
436 spin_lock_irq(&request
->lock
);
437 if (request
->waitboost
)
438 atomic_dec(&request
->i915
->gt_pm
.rps
.num_waiters
);
439 dma_fence_signal_locked(&request
->fence
);
440 spin_unlock_irq(&request
->lock
);
442 i915_priotree_fini(request
->i915
, &request
->priotree
);
443 i915_gem_request_put(request
);
446 void i915_gem_request_retire_upto(struct drm_i915_gem_request
*req
)
448 struct intel_engine_cs
*engine
= req
->engine
;
449 struct drm_i915_gem_request
*tmp
;
451 lockdep_assert_held(&req
->i915
->drm
.struct_mutex
);
452 GEM_BUG_ON(!i915_gem_request_completed(req
));
454 if (list_empty(&req
->link
))
458 tmp
= list_first_entry(&engine
->timeline
->requests
,
461 i915_gem_request_retire(tmp
);
462 } while (tmp
!= req
);
465 static u32
timeline_get_seqno(struct intel_timeline
*tl
)
470 void __i915_gem_request_submit(struct drm_i915_gem_request
*request
)
472 struct intel_engine_cs
*engine
= request
->engine
;
473 struct intel_timeline
*timeline
;
476 GEM_BUG_ON(!irqs_disabled());
477 lockdep_assert_held(&engine
->timeline
->lock
);
479 /* Transfer from per-context onto the global per-engine timeline */
480 timeline
= engine
->timeline
;
481 GEM_BUG_ON(timeline
== request
->timeline
);
482 GEM_BUG_ON(request
->global_seqno
);
484 seqno
= timeline_get_seqno(timeline
);
486 GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine
), seqno
));
488 /* We may be recursing from the signal callback of another i915 fence */
489 spin_lock_nested(&request
->lock
, SINGLE_DEPTH_NESTING
);
490 request
->global_seqno
= seqno
;
491 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT
, &request
->fence
.flags
))
492 intel_engine_enable_signaling(request
, false);
493 spin_unlock(&request
->lock
);
495 engine
->emit_breadcrumb(request
,
496 request
->ring
->vaddr
+ request
->postfix
);
498 spin_lock(&request
->timeline
->lock
);
499 list_move_tail(&request
->link
, &timeline
->requests
);
500 spin_unlock(&request
->timeline
->lock
);
502 trace_i915_gem_request_execute(request
);
504 wake_up_all(&request
->execute
);
507 void i915_gem_request_submit(struct drm_i915_gem_request
*request
)
509 struct intel_engine_cs
*engine
= request
->engine
;
512 /* Will be called from irq-context when using foreign fences. */
513 spin_lock_irqsave(&engine
->timeline
->lock
, flags
);
515 __i915_gem_request_submit(request
);
517 spin_unlock_irqrestore(&engine
->timeline
->lock
, flags
);
520 void __i915_gem_request_unsubmit(struct drm_i915_gem_request
*request
)
522 struct intel_engine_cs
*engine
= request
->engine
;
523 struct intel_timeline
*timeline
;
525 GEM_BUG_ON(!irqs_disabled());
526 lockdep_assert_held(&engine
->timeline
->lock
);
528 /* Only unwind in reverse order, required so that the per-context list
529 * is kept in seqno/ring order.
531 GEM_BUG_ON(!request
->global_seqno
);
532 GEM_BUG_ON(request
->global_seqno
!= engine
->timeline
->seqno
);
533 engine
->timeline
->seqno
--;
535 /* We may be recursing from the signal callback of another i915 fence */
536 spin_lock_nested(&request
->lock
, SINGLE_DEPTH_NESTING
);
537 request
->global_seqno
= 0;
538 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT
, &request
->fence
.flags
))
539 intel_engine_cancel_signaling(request
);
540 spin_unlock(&request
->lock
);
542 /* Transfer back from the global per-engine timeline to per-context */
543 timeline
= request
->timeline
;
544 GEM_BUG_ON(timeline
== engine
->timeline
);
546 spin_lock(&timeline
->lock
);
547 list_move(&request
->link
, &timeline
->requests
);
548 spin_unlock(&timeline
->lock
);
550 /* We don't need to wake_up any waiters on request->execute, they
551 * will get woken by any other event or us re-adding this request
552 * to the engine timeline (__i915_gem_request_submit()). The waiters
553 * should be quite adapt at finding that the request now has a new
554 * global_seqno to the one they went to sleep on.
558 void i915_gem_request_unsubmit(struct drm_i915_gem_request
*request
)
560 struct intel_engine_cs
*engine
= request
->engine
;
563 /* Will be called from irq-context when using foreign fences. */
564 spin_lock_irqsave(&engine
->timeline
->lock
, flags
);
566 __i915_gem_request_unsubmit(request
);
568 spin_unlock_irqrestore(&engine
->timeline
->lock
, flags
);
571 static int __i915_sw_fence_call
572 submit_notify(struct i915_sw_fence
*fence
, enum i915_sw_fence_notify state
)
574 struct drm_i915_gem_request
*request
=
575 container_of(fence
, typeof(*request
), submit
);
579 trace_i915_gem_request_submit(request
);
581 * We need to serialize use of the submit_request() callback with its
582 * hotplugging performed during an emergency i915_gem_set_wedged().
583 * We use the RCU mechanism to mark the critical section in order to
584 * force i915_gem_set_wedged() to wait until the submit_request() is
585 * completed before proceeding.
588 request
->engine
->submit_request(request
);
593 i915_gem_request_put(request
);
601 * i915_gem_request_alloc - allocate a request structure
603 * @engine: engine that we wish to issue the request on.
604 * @ctx: context that the request will be associated with.
606 * Returns a pointer to the allocated request if successful,
607 * or an error code if not.
609 struct drm_i915_gem_request
*
610 i915_gem_request_alloc(struct intel_engine_cs
*engine
,
611 struct i915_gem_context
*ctx
)
613 struct drm_i915_private
*dev_priv
= engine
->i915
;
614 struct drm_i915_gem_request
*req
;
615 struct intel_ring
*ring
;
618 lockdep_assert_held(&dev_priv
->drm
.struct_mutex
);
621 * Preempt contexts are reserved for exclusive use to inject a
622 * preemption context switch. They are never to be used for any trivial
625 GEM_BUG_ON(ctx
== dev_priv
->preempt_context
);
627 /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
628 * EIO if the GPU is already wedged.
630 if (i915_terminally_wedged(&dev_priv
->gpu_error
))
631 return ERR_PTR(-EIO
);
633 /* Pinning the contexts may generate requests in order to acquire
634 * GGTT space, so do this first before we reserve a seqno for
637 ring
= engine
->context_pin(engine
, ctx
);
639 return ERR_CAST(ring
);
642 ret
= reserve_engine(engine
);
646 ret
= intel_ring_wait_for_space(ring
, MIN_SPACE_FOR_ADD_REQUEST
);
650 /* Move the oldest request to the slab-cache (if not in use!) */
651 req
= list_first_entry_or_null(&engine
->timeline
->requests
,
653 if (req
&& i915_gem_request_completed(req
))
654 i915_gem_request_retire(req
);
656 /* Beware: Dragons be flying overhead.
658 * We use RCU to look up requests in flight. The lookups may
659 * race with the request being allocated from the slab freelist.
660 * That is the request we are writing to here, may be in the process
661 * of being read by __i915_gem_active_get_rcu(). As such,
662 * we have to be very careful when overwriting the contents. During
663 * the RCU lookup, we change chase the request->engine pointer,
664 * read the request->global_seqno and increment the reference count.
666 * The reference count is incremented atomically. If it is zero,
667 * the lookup knows the request is unallocated and complete. Otherwise,
668 * it is either still in use, or has been reallocated and reset
669 * with dma_fence_init(). This increment is safe for release as we
670 * check that the request we have a reference to and matches the active
673 * Before we increment the refcount, we chase the request->engine
674 * pointer. We must not call kmem_cache_zalloc() or else we set
675 * that pointer to NULL and cause a crash during the lookup. If
676 * we see the request is completed (based on the value of the
677 * old engine and seqno), the lookup is complete and reports NULL.
678 * If we decide the request is not completed (new engine or seqno),
679 * then we grab a reference and double check that it is still the
680 * active request - which it won't be and restart the lookup.
682 * Do not use kmem_cache_zalloc() here!
684 req
= kmem_cache_alloc(dev_priv
->requests
,
685 GFP_KERNEL
| __GFP_RETRY_MAYFAIL
| __GFP_NOWARN
);
686 if (unlikely(!req
)) {
687 /* Ratelimit ourselves to prevent oom from malicious clients */
688 ret
= i915_gem_wait_for_idle(dev_priv
,
690 I915_WAIT_INTERRUPTIBLE
);
694 req
= kmem_cache_alloc(dev_priv
->requests
, GFP_KERNEL
);
701 req
->timeline
= i915_gem_context_lookup_timeline(ctx
, engine
);
702 GEM_BUG_ON(req
->timeline
== engine
->timeline
);
704 spin_lock_init(&req
->lock
);
705 dma_fence_init(&req
->fence
,
708 req
->timeline
->fence_context
,
709 timeline_get_seqno(req
->timeline
));
711 /* We bump the ref for the fence chain */
712 i915_sw_fence_init(&i915_gem_request_get(req
)->submit
, submit_notify
);
713 init_waitqueue_head(&req
->execute
);
715 i915_priotree_init(&req
->priotree
);
717 INIT_LIST_HEAD(&req
->active_list
);
718 req
->i915
= dev_priv
;
719 req
->engine
= engine
;
723 /* No zalloc, must clear what we need by hand */
724 req
->global_seqno
= 0;
725 req
->file_priv
= NULL
;
727 req
->capture_list
= NULL
;
728 req
->waitboost
= false;
731 * Reserve space in the ring buffer for all the commands required to
732 * eventually emit this request. This is to guarantee that the
733 * i915_add_request() call can't fail. Note that the reserve may need
734 * to be redone if the request is not actually submitted straight
735 * away, e.g. because a GPU scheduler has deferred it.
737 req
->reserved_space
= MIN_SPACE_FOR_ADD_REQUEST
;
738 GEM_BUG_ON(req
->reserved_space
< engine
->emit_breadcrumb_sz
);
741 * Record the position of the start of the request so that
742 * should we detect the updated seqno part-way through the
743 * GPU processing the request, we never over-estimate the
744 * position of the head.
746 req
->head
= req
->ring
->emit
;
748 /* Unconditionally invalidate GPU caches and TLBs. */
749 ret
= engine
->emit_flush(req
, EMIT_INVALIDATE
);
753 ret
= engine
->request_alloc(req
);
757 /* Check that we didn't interrupt ourselves with a new request */
758 GEM_BUG_ON(req
->timeline
->seqno
!= req
->fence
.seqno
);
762 req
->ring
->emit
= req
->head
;
764 /* Make sure we didn't add ourselves to external state before freeing */
765 GEM_BUG_ON(!list_empty(&req
->active_list
));
766 GEM_BUG_ON(!list_empty(&req
->priotree
.signalers_list
));
767 GEM_BUG_ON(!list_empty(&req
->priotree
.waiters_list
));
769 kmem_cache_free(dev_priv
->requests
, req
);
771 unreserve_engine(engine
);
773 engine
->context_unpin(engine
, ctx
);
778 i915_gem_request_await_request(struct drm_i915_gem_request
*to
,
779 struct drm_i915_gem_request
*from
)
783 GEM_BUG_ON(to
== from
);
784 GEM_BUG_ON(to
->timeline
== from
->timeline
);
786 if (i915_gem_request_completed(from
))
789 if (to
->engine
->schedule
) {
790 ret
= i915_priotree_add_dependency(to
->i915
,
797 if (to
->engine
== from
->engine
) {
798 ret
= i915_sw_fence_await_sw_fence_gfp(&to
->submit
,
801 return ret
< 0 ? ret
: 0;
804 if (to
->engine
->semaphore
.sync_to
) {
807 GEM_BUG_ON(!from
->engine
->semaphore
.signal
);
809 seqno
= i915_gem_request_global_seqno(from
);
811 goto await_dma_fence
;
813 if (seqno
<= to
->timeline
->global_sync
[from
->engine
->id
])
816 trace_i915_gem_ring_sync_to(to
, from
);
817 ret
= to
->engine
->semaphore
.sync_to(to
, from
);
821 to
->timeline
->global_sync
[from
->engine
->id
] = seqno
;
826 ret
= i915_sw_fence_await_dma_fence(&to
->submit
,
829 return ret
< 0 ? ret
: 0;
833 i915_gem_request_await_dma_fence(struct drm_i915_gem_request
*req
,
834 struct dma_fence
*fence
)
836 struct dma_fence
**child
= &fence
;
837 unsigned int nchild
= 1;
840 /* Note that if the fence-array was created in signal-on-any mode,
841 * we should *not* decompose it into its individual fences. However,
842 * we don't currently store which mode the fence-array is operating
843 * in. Fortunately, the only user of signal-on-any is private to
844 * amdgpu and we should not see any incoming fence-array from
845 * sync-file being in signal-on-any mode.
847 if (dma_fence_is_array(fence
)) {
848 struct dma_fence_array
*array
= to_dma_fence_array(fence
);
850 child
= array
->fences
;
851 nchild
= array
->num_fences
;
857 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &fence
->flags
))
861 * Requests on the same timeline are explicitly ordered, along
862 * with their dependencies, by i915_add_request() which ensures
863 * that requests are submitted in-order through each ring.
865 if (fence
->context
== req
->fence
.context
)
868 /* Squash repeated waits to the same timelines */
869 if (fence
->context
!= req
->i915
->mm
.unordered_timeline
&&
870 intel_timeline_sync_is_later(req
->timeline
, fence
))
873 if (dma_fence_is_i915(fence
))
874 ret
= i915_gem_request_await_request(req
,
877 ret
= i915_sw_fence_await_dma_fence(&req
->submit
, fence
,
883 /* Record the latest fence used against each timeline */
884 if (fence
->context
!= req
->i915
->mm
.unordered_timeline
)
885 intel_timeline_sync_set(req
->timeline
, fence
);
892 * i915_gem_request_await_object - set this request to (async) wait upon a bo
894 * @to: request we are wishing to use
895 * @obj: object which may be in use on another ring.
897 * This code is meant to abstract object synchronization with the GPU.
898 * Conceptually we serialise writes between engines inside the GPU.
899 * We only allow one engine to write into a buffer at any time, but
900 * multiple readers. To ensure each has a coherent view of memory, we must:
902 * - If there is an outstanding write request to the object, the new
903 * request must wait for it to complete (either CPU or in hw, requests
904 * on the same ring will be naturally ordered).
906 * - If we are a write request (pending_write_domain is set), the new
907 * request must wait for outstanding read requests to complete.
909 * Returns 0 if successful, else propagates up the lower layer error.
912 i915_gem_request_await_object(struct drm_i915_gem_request
*to
,
913 struct drm_i915_gem_object
*obj
,
916 struct dma_fence
*excl
;
920 struct dma_fence
**shared
;
921 unsigned int count
, i
;
923 ret
= reservation_object_get_fences_rcu(obj
->resv
,
924 &excl
, &count
, &shared
);
928 for (i
= 0; i
< count
; i
++) {
929 ret
= i915_gem_request_await_dma_fence(to
, shared
[i
]);
933 dma_fence_put(shared
[i
]);
936 for (; i
< count
; i
++)
937 dma_fence_put(shared
[i
]);
940 excl
= reservation_object_get_excl_rcu(obj
->resv
);
945 ret
= i915_gem_request_await_dma_fence(to
, excl
);
954 * NB: This function is not allowed to fail. Doing so would mean the the
955 * request is not being tracked for completion but the work itself is
956 * going to happen on the hardware. This would be a Bad Thing(tm).
958 void __i915_add_request(struct drm_i915_gem_request
*request
, bool flush_caches
)
960 struct intel_engine_cs
*engine
= request
->engine
;
961 struct intel_ring
*ring
= request
->ring
;
962 struct intel_timeline
*timeline
= request
->timeline
;
963 struct drm_i915_gem_request
*prev
;
967 lockdep_assert_held(&request
->i915
->drm
.struct_mutex
);
968 trace_i915_gem_request_add(request
);
970 /* Make sure that no request gazumped us - if it was allocated after
971 * our i915_gem_request_alloc() and called __i915_add_request() before
972 * us, the timeline will hold its seqno which is later than ours.
974 GEM_BUG_ON(timeline
->seqno
!= request
->fence
.seqno
);
977 * To ensure that this call will not fail, space for its emissions
978 * should already have been reserved in the ring buffer. Let the ring
979 * know that it is time to use that space up.
981 request
->reserved_space
= 0;
984 * Emit any outstanding flushes - execbuf can fail to emit the flush
985 * after having emitted the batchbuffer command. Hence we need to fix
986 * things up similar to emitting the lazy request. The difference here
987 * is that the flush _must_ happen before the next request, no matter
991 err
= engine
->emit_flush(request
, EMIT_FLUSH
);
993 /* Not allowed to fail! */
994 WARN(err
, "engine->emit_flush() failed: %d!\n", err
);
997 /* Record the position of the start of the breadcrumb so that
998 * should we detect the updated seqno part-way through the
999 * GPU processing the request, we never over-estimate the
1000 * position of the ring's HEAD.
1002 cs
= intel_ring_begin(request
, engine
->emit_breadcrumb_sz
);
1003 GEM_BUG_ON(IS_ERR(cs
));
1004 request
->postfix
= intel_ring_offset(request
, cs
);
1006 /* Seal the request and mark it as pending execution. Note that
1007 * we may inspect this state, without holding any locks, during
1008 * hangcheck. Hence we apply the barrier to ensure that we do not
1009 * see a more recent value in the hws than we are tracking.
1012 prev
= i915_gem_active_raw(&timeline
->last_request
,
1013 &request
->i915
->drm
.struct_mutex
);
1015 i915_sw_fence_await_sw_fence(&request
->submit
, &prev
->submit
,
1017 if (engine
->schedule
)
1018 __i915_priotree_add_dependency(&request
->priotree
,
1024 spin_lock_irq(&timeline
->lock
);
1025 list_add_tail(&request
->link
, &timeline
->requests
);
1026 spin_unlock_irq(&timeline
->lock
);
1028 GEM_BUG_ON(timeline
->seqno
!= request
->fence
.seqno
);
1029 i915_gem_active_set(&timeline
->last_request
, request
);
1031 list_add_tail(&request
->ring_link
, &ring
->request_list
);
1032 request
->emitted_jiffies
= jiffies
;
1034 /* Let the backend know a new request has arrived that may need
1035 * to adjust the existing execution schedule due to a high priority
1036 * request - i.e. we may want to preempt the current request in order
1037 * to run a high priority dependency chain *before* we can execute this
1040 * This is called before the request is ready to run so that we can
1041 * decide whether to preempt the entire chain so that it is ready to
1042 * run at the earliest possible convenience.
1044 if (engine
->schedule
)
1045 engine
->schedule(request
, request
->ctx
->priority
);
1048 i915_sw_fence_commit(&request
->submit
);
1049 local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
1052 static unsigned long local_clock_us(unsigned int *cpu
)
1056 /* Cheaply and approximately convert from nanoseconds to microseconds.
1057 * The result and subsequent calculations are also defined in the same
1058 * approximate microseconds units. The principal source of timing
1059 * error here is from the simple truncation.
1061 * Note that local_clock() is only defined wrt to the current CPU;
1062 * the comparisons are no longer valid if we switch CPUs. Instead of
1063 * blocking preemption for the entire busywait, we can detect the CPU
1064 * switch and use that as indicator of system load and a reason to
1065 * stop busywaiting, see busywait_stop().
1068 t
= local_clock() >> 10;
1074 static bool busywait_stop(unsigned long timeout
, unsigned int cpu
)
1076 unsigned int this_cpu
;
1078 if (time_after(local_clock_us(&this_cpu
), timeout
))
1081 return this_cpu
!= cpu
;
1084 static bool __i915_spin_request(const struct drm_i915_gem_request
*req
,
1085 u32 seqno
, int state
, unsigned long timeout_us
)
1087 struct intel_engine_cs
*engine
= req
->engine
;
1088 unsigned int irq
, cpu
;
1093 * Only wait for the request if we know it is likely to complete.
1095 * We don't track the timestamps around requests, nor the average
1096 * request length, so we do not have a good indicator that this
1097 * request will complete within the timeout. What we do know is the
1098 * order in which requests are executed by the engine and so we can
1099 * tell if the request has started. If the request hasn't started yet,
1100 * it is a fair assumption that it will not complete within our
1101 * relatively short timeout.
1103 if (!i915_seqno_passed(intel_engine_get_seqno(engine
), seqno
- 1))
1106 /* When waiting for high frequency requests, e.g. during synchronous
1107 * rendering split between the CPU and GPU, the finite amount of time
1108 * required to set up the irq and wait upon it limits the response
1109 * rate. By busywaiting on the request completion for a short while we
1110 * can service the high frequency waits as quick as possible. However,
1111 * if it is a slow request, we want to sleep as quickly as possible.
1112 * The tradeoff between waiting and sleeping is roughly the time it
1113 * takes to sleep on a request, on the order of a microsecond.
1116 irq
= atomic_read(&engine
->irq_count
);
1117 timeout_us
+= local_clock_us(&cpu
);
1119 if (i915_seqno_passed(intel_engine_get_seqno(engine
), seqno
))
1120 return seqno
== i915_gem_request_global_seqno(req
);
1122 /* Seqno are meant to be ordered *before* the interrupt. If
1123 * we see an interrupt without a corresponding seqno advance,
1124 * assume we won't see one in the near future but require
1125 * the engine->seqno_barrier() to fixup coherency.
1127 if (atomic_read(&engine
->irq_count
) != irq
)
1130 if (signal_pending_state(state
, current
))
1133 if (busywait_stop(timeout_us
, cpu
))
1137 } while (!need_resched());
1142 static bool __i915_wait_request_check_and_reset(struct drm_i915_gem_request
*request
)
1144 if (likely(!i915_reset_handoff(&request
->i915
->gpu_error
)))
1147 __set_current_state(TASK_RUNNING
);
1148 i915_reset(request
->i915
, 0);
1153 * i915_wait_request - wait until execution of request has finished
1154 * @req: the request to wait upon
1155 * @flags: how to wait
1156 * @timeout: how long to wait in jiffies
1158 * i915_wait_request() waits for the request to be completed, for a
1159 * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
1162 * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
1163 * in via the flags, and vice versa if the struct_mutex is not held, the caller
1164 * must not specify that the wait is locked.
1166 * Returns the remaining time (in jiffies) if the request completed, which may
1167 * be zero or -ETIME if the request is unfinished after the timeout expires.
1168 * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
1169 * pending before the request completes.
1171 long i915_wait_request(struct drm_i915_gem_request
*req
,
1175 const int state
= flags
& I915_WAIT_INTERRUPTIBLE
?
1176 TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE
;
1177 wait_queue_head_t
*errq
= &req
->i915
->gpu_error
.wait_queue
;
1178 DEFINE_WAIT_FUNC(reset
, default_wake_function
);
1179 DEFINE_WAIT_FUNC(exec
, default_wake_function
);
1180 struct intel_wait wait
;
1183 #if IS_ENABLED(CONFIG_LOCKDEP)
1184 GEM_BUG_ON(debug_locks
&&
1185 !!lockdep_is_held(&req
->i915
->drm
.struct_mutex
) !=
1186 !!(flags
& I915_WAIT_LOCKED
));
1188 GEM_BUG_ON(timeout
< 0);
1190 if (i915_gem_request_completed(req
))
1196 trace_i915_gem_request_wait_begin(req
, flags
);
1198 add_wait_queue(&req
->execute
, &exec
);
1199 if (flags
& I915_WAIT_LOCKED
)
1200 add_wait_queue(errq
, &reset
);
1202 intel_wait_init(&wait
, req
);
1206 set_current_state(state
);
1207 if (intel_wait_update_request(&wait
, req
))
1210 if (flags
& I915_WAIT_LOCKED
&&
1211 __i915_wait_request_check_and_reset(req
))
1214 if (signal_pending_state(state
, current
)) {
1215 timeout
= -ERESTARTSYS
;
1224 timeout
= io_schedule_timeout(timeout
);
1227 GEM_BUG_ON(!intel_wait_has_seqno(&wait
));
1228 GEM_BUG_ON(!i915_sw_fence_signaled(&req
->submit
));
1230 /* Optimistic short spin before touching IRQs */
1231 if (__i915_spin_request(req
, wait
.seqno
, state
, 5))
1234 set_current_state(state
);
1235 if (intel_engine_add_wait(req
->engine
, &wait
))
1236 /* In order to check that we haven't missed the interrupt
1237 * as we enabled it, we need to kick ourselves to do a
1238 * coherent check on the seqno before we sleep.
1242 if (flags
& I915_WAIT_LOCKED
)
1243 __i915_wait_request_check_and_reset(req
);
1246 if (signal_pending_state(state
, current
)) {
1247 timeout
= -ERESTARTSYS
;
1256 timeout
= io_schedule_timeout(timeout
);
1258 if (intel_wait_complete(&wait
) &&
1259 intel_wait_check_request(&wait
, req
))
1262 set_current_state(state
);
1265 /* Carefully check if the request is complete, giving time
1266 * for the seqno to be visible following the interrupt.
1267 * We also have to check in case we are kicked by the GPU
1268 * reset in order to drop the struct_mutex.
1270 if (__i915_request_irq_complete(req
))
1273 /* If the GPU is hung, and we hold the lock, reset the GPU
1274 * and then check for completion. On a full reset, the engine's
1275 * HW seqno will be advanced passed us and we are complete.
1276 * If we do a partial reset, we have to wait for the GPU to
1277 * resume and update the breadcrumb.
1279 * If we don't hold the mutex, we can just wait for the worker
1280 * to come along and update the breadcrumb (either directly
1281 * itself, or indirectly by recovering the GPU).
1283 if (flags
& I915_WAIT_LOCKED
&&
1284 __i915_wait_request_check_and_reset(req
))
1287 /* Only spin if we know the GPU is processing this request */
1288 if (__i915_spin_request(req
, wait
.seqno
, state
, 2))
1291 if (!intel_wait_check_request(&wait
, req
)) {
1292 intel_engine_remove_wait(req
->engine
, &wait
);
1297 intel_engine_remove_wait(req
->engine
, &wait
);
1299 __set_current_state(TASK_RUNNING
);
1300 if (flags
& I915_WAIT_LOCKED
)
1301 remove_wait_queue(errq
, &reset
);
1302 remove_wait_queue(&req
->execute
, &exec
);
1303 trace_i915_gem_request_wait_end(req
);
1308 static void engine_retire_requests(struct intel_engine_cs
*engine
)
1310 struct drm_i915_gem_request
*request
, *next
;
1311 u32 seqno
= intel_engine_get_seqno(engine
);
1314 spin_lock_irq(&engine
->timeline
->lock
);
1315 list_for_each_entry_safe(request
, next
,
1316 &engine
->timeline
->requests
, link
) {
1317 if (!i915_seqno_passed(seqno
, request
->global_seqno
))
1320 list_move_tail(&request
->link
, &retire
);
1322 spin_unlock_irq(&engine
->timeline
->lock
);
1324 list_for_each_entry_safe(request
, next
, &retire
, link
)
1325 i915_gem_request_retire(request
);
1328 void i915_gem_retire_requests(struct drm_i915_private
*dev_priv
)
1330 struct intel_engine_cs
*engine
;
1331 enum intel_engine_id id
;
1333 lockdep_assert_held(&dev_priv
->drm
.struct_mutex
);
1335 if (!dev_priv
->gt
.active_requests
)
1338 for_each_engine(engine
, dev_priv
, id
)
1339 engine_retire_requests(engine
);
1342 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1343 #include "selftests/mock_request.c"
1344 #include "selftests/i915_gem_request.c"