2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/prefetch.h>
26 #include <linux/dma-fence-array.h>
27 #include <linux/sched.h>
28 #include <linux/sched/clock.h>
29 #include <linux/sched/signal.h>
33 static const char *i915_fence_get_driver_name(struct dma_fence
*fence
)
38 static const char *i915_fence_get_timeline_name(struct dma_fence
*fence
)
41 * The timeline struct (as part of the ppgtt underneath a context)
42 * may be freed when the request is no longer in use by the GPU.
43 * We could extend the life of a context to beyond that of all
44 * fences, possibly keeping the hw resource around indefinitely,
45 * or we just give them a false name. Since
46 * dma_fence_ops.get_timeline_name is a debug feature, the occasional
47 * lie seems justifiable.
49 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &fence
->flags
))
52 return to_request(fence
)->timeline
->name
;
55 static bool i915_fence_signaled(struct dma_fence
*fence
)
57 return i915_request_completed(to_request(fence
));
60 static bool i915_fence_enable_signaling(struct dma_fence
*fence
)
62 return intel_engine_enable_signaling(to_request(fence
), true);
65 static signed long i915_fence_wait(struct dma_fence
*fence
,
69 return i915_request_wait(to_request(fence
), interruptible
, timeout
);
72 static void i915_fence_release(struct dma_fence
*fence
)
74 struct i915_request
*rq
= to_request(fence
);
77 * The request is put onto a RCU freelist (i.e. the address
78 * is immediately reused), mark the fences as being freed now.
79 * Otherwise the debugobjects for the fences are only marked as
80 * freed when the slab cache itself is freed, and so we would get
81 * caught trying to reuse dead objects.
83 i915_sw_fence_fini(&rq
->submit
);
85 kmem_cache_free(rq
->i915
->requests
, rq
);
88 const struct dma_fence_ops i915_fence_ops
= {
89 .get_driver_name
= i915_fence_get_driver_name
,
90 .get_timeline_name
= i915_fence_get_timeline_name
,
91 .enable_signaling
= i915_fence_enable_signaling
,
92 .signaled
= i915_fence_signaled
,
93 .wait
= i915_fence_wait
,
94 .release
= i915_fence_release
,
98 i915_request_remove_from_client(struct i915_request
*request
)
100 struct drm_i915_file_private
*file_priv
;
102 file_priv
= request
->file_priv
;
106 spin_lock(&file_priv
->mm
.lock
);
107 if (request
->file_priv
) {
108 list_del(&request
->client_link
);
109 request
->file_priv
= NULL
;
111 spin_unlock(&file_priv
->mm
.lock
);
114 static struct i915_dependency
*
115 i915_dependency_alloc(struct drm_i915_private
*i915
)
117 return kmem_cache_alloc(i915
->dependencies
, GFP_KERNEL
);
121 i915_dependency_free(struct drm_i915_private
*i915
,
122 struct i915_dependency
*dep
)
124 kmem_cache_free(i915
->dependencies
, dep
);
128 __i915_sched_node_add_dependency(struct i915_sched_node
*node
,
129 struct i915_sched_node
*signal
,
130 struct i915_dependency
*dep
,
133 INIT_LIST_HEAD(&dep
->dfs_link
);
134 list_add(&dep
->wait_link
, &signal
->waiters_list
);
135 list_add(&dep
->signal_link
, &node
->signalers_list
);
136 dep
->signaler
= signal
;
141 i915_sched_node_add_dependency(struct drm_i915_private
*i915
,
142 struct i915_sched_node
*node
,
143 struct i915_sched_node
*signal
)
145 struct i915_dependency
*dep
;
147 dep
= i915_dependency_alloc(i915
);
151 __i915_sched_node_add_dependency(node
, signal
, dep
,
152 I915_DEPENDENCY_ALLOC
);
157 i915_sched_node_fini(struct drm_i915_private
*i915
,
158 struct i915_sched_node
*node
)
160 struct i915_dependency
*dep
, *tmp
;
162 GEM_BUG_ON(!list_empty(&node
->link
));
165 * Everyone we depended upon (the fences we wait to be signaled)
166 * should retire before us and remove themselves from our list.
167 * However, retirement is run independently on each timeline and
168 * so we may be called out-of-order.
170 list_for_each_entry_safe(dep
, tmp
, &node
->signalers_list
, signal_link
) {
171 GEM_BUG_ON(!i915_sched_node_signaled(dep
->signaler
));
172 GEM_BUG_ON(!list_empty(&dep
->dfs_link
));
174 list_del(&dep
->wait_link
);
175 if (dep
->flags
& I915_DEPENDENCY_ALLOC
)
176 i915_dependency_free(i915
, dep
);
179 /* Remove ourselves from everyone who depends upon us */
180 list_for_each_entry_safe(dep
, tmp
, &node
->waiters_list
, wait_link
) {
181 GEM_BUG_ON(dep
->signaler
!= node
);
182 GEM_BUG_ON(!list_empty(&dep
->dfs_link
));
184 list_del(&dep
->signal_link
);
185 if (dep
->flags
& I915_DEPENDENCY_ALLOC
)
186 i915_dependency_free(i915
, dep
);
191 i915_sched_node_init(struct i915_sched_node
*node
)
193 INIT_LIST_HEAD(&node
->signalers_list
);
194 INIT_LIST_HEAD(&node
->waiters_list
);
195 INIT_LIST_HEAD(&node
->link
);
196 node
->attr
.priority
= I915_PRIORITY_INVALID
;
199 static int reset_all_global_seqno(struct drm_i915_private
*i915
, u32 seqno
)
201 struct intel_engine_cs
*engine
;
202 struct i915_timeline
*timeline
;
203 enum intel_engine_id id
;
206 /* Carefully retire all requests without writing to the rings */
207 ret
= i915_gem_wait_for_idle(i915
,
208 I915_WAIT_INTERRUPTIBLE
|
210 MAX_SCHEDULE_TIMEOUT
);
214 GEM_BUG_ON(i915
->gt
.active_requests
);
216 /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
217 for_each_engine(engine
, i915
, id
) {
218 GEM_TRACE("%s seqno %d (current %d) -> %d\n",
220 engine
->timeline
.seqno
,
221 intel_engine_get_seqno(engine
),
224 if (!i915_seqno_passed(seqno
, engine
->timeline
.seqno
)) {
225 /* Flush any waiters before we reuse the seqno */
226 intel_engine_disarm_breadcrumbs(engine
);
227 intel_engine_init_hangcheck(engine
);
228 GEM_BUG_ON(!list_empty(&engine
->breadcrumbs
.signals
));
231 /* Check we are idle before we fiddle with hw state! */
232 GEM_BUG_ON(!intel_engine_is_idle(engine
));
233 GEM_BUG_ON(i915_gem_active_isset(&engine
->timeline
.last_request
));
235 /* Finally reset hw state */
236 intel_engine_init_global_seqno(engine
, seqno
);
237 engine
->timeline
.seqno
= seqno
;
240 list_for_each_entry(timeline
, &i915
->gt
.timelines
, link
)
241 memset(timeline
->global_sync
, 0, sizeof(timeline
->global_sync
));
243 i915
->gt
.request_serial
= seqno
;
248 int i915_gem_set_global_seqno(struct drm_device
*dev
, u32 seqno
)
250 struct drm_i915_private
*i915
= to_i915(dev
);
252 lockdep_assert_held(&i915
->drm
.struct_mutex
);
257 /* HWS page needs to be set less than what we will inject to ring */
258 return reset_all_global_seqno(i915
, seqno
- 1);
261 static int reserve_gt(struct drm_i915_private
*i915
)
266 * Reservation is fine until we may need to wrap around
268 * By incrementing the serial for every request, we know that no
269 * individual engine may exceed that serial (as each is reset to 0
270 * on any wrap). This protects even the most pessimistic of migrations
271 * of every request from all engines onto just one.
273 while (unlikely(++i915
->gt
.request_serial
== 0)) {
274 ret
= reset_all_global_seqno(i915
, 0);
276 i915
->gt
.request_serial
--;
281 if (!i915
->gt
.active_requests
++)
282 i915_gem_unpark(i915
);
287 static void unreserve_gt(struct drm_i915_private
*i915
)
289 GEM_BUG_ON(!i915
->gt
.active_requests
);
290 if (!--i915
->gt
.active_requests
)
294 void i915_gem_retire_noop(struct i915_gem_active
*active
,
295 struct i915_request
*request
)
297 /* Space left intentionally blank */
300 static void advance_ring(struct i915_request
*request
)
302 struct intel_ring
*ring
= request
->ring
;
306 * We know the GPU must have read the request to have
307 * sent us the seqno + interrupt, so use the position
308 * of tail of the request to update the last known position
311 * Note this requires that we are always called in request
314 GEM_BUG_ON(!list_is_first(&request
->ring_link
, &ring
->request_list
));
315 if (list_is_last(&request
->ring_link
, &ring
->request_list
)) {
317 * We may race here with execlists resubmitting this request
318 * as we retire it. The resubmission will move the ring->tail
319 * forwards (to request->wa_tail). We either read the
320 * current value that was written to hw, or the value that
321 * is just about to be. Either works, if we miss the last two
322 * noops - they are safe to be replayed on a reset.
324 GEM_TRACE("marking %s as inactive\n", ring
->timeline
->name
);
325 tail
= READ_ONCE(request
->tail
);
326 list_del(&ring
->active_link
);
328 tail
= request
->postfix
;
330 list_del_init(&request
->ring_link
);
335 static void free_capture_list(struct i915_request
*request
)
337 struct i915_capture_list
*capture
;
339 capture
= request
->capture_list
;
341 struct i915_capture_list
*next
= capture
->next
;
348 static void __retire_engine_request(struct intel_engine_cs
*engine
,
349 struct i915_request
*rq
)
351 GEM_TRACE("%s(%s) fence %llx:%d, global=%d, current %d\n",
352 __func__
, engine
->name
,
353 rq
->fence
.context
, rq
->fence
.seqno
,
355 intel_engine_get_seqno(engine
));
357 GEM_BUG_ON(!i915_request_completed(rq
));
361 spin_lock(&engine
->timeline
.lock
);
362 GEM_BUG_ON(!list_is_first(&rq
->link
, &engine
->timeline
.requests
));
363 list_del_init(&rq
->link
);
364 spin_unlock(&engine
->timeline
.lock
);
366 spin_lock(&rq
->lock
);
367 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &rq
->fence
.flags
))
368 dma_fence_signal_locked(&rq
->fence
);
369 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT
, &rq
->fence
.flags
))
370 intel_engine_cancel_signaling(rq
);
372 GEM_BUG_ON(!atomic_read(&rq
->i915
->gt_pm
.rps
.num_waiters
));
373 atomic_dec(&rq
->i915
->gt_pm
.rps
.num_waiters
);
375 spin_unlock(&rq
->lock
);
380 * The backing object for the context is done after switching to the
381 * *next* context. Therefore we cannot retire the previous context until
382 * the next context has already started running. However, since we
383 * cannot take the required locks at i915_request_submit() we
384 * defer the unpinning of the active context to now, retirement of
385 * the subsequent request.
387 if (engine
->last_retired_context
)
388 intel_context_unpin(engine
->last_retired_context
);
389 engine
->last_retired_context
= rq
->hw_context
;
392 static void __retire_engine_upto(struct intel_engine_cs
*engine
,
393 struct i915_request
*rq
)
395 struct i915_request
*tmp
;
397 if (list_empty(&rq
->link
))
401 tmp
= list_first_entry(&engine
->timeline
.requests
,
404 GEM_BUG_ON(tmp
->engine
!= engine
);
405 __retire_engine_request(engine
, tmp
);
409 static void i915_request_retire(struct i915_request
*request
)
411 struct i915_gem_active
*active
, *next
;
413 GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n",
414 request
->engine
->name
,
415 request
->fence
.context
, request
->fence
.seqno
,
416 request
->global_seqno
,
417 intel_engine_get_seqno(request
->engine
));
419 lockdep_assert_held(&request
->i915
->drm
.struct_mutex
);
420 GEM_BUG_ON(!i915_sw_fence_signaled(&request
->submit
));
421 GEM_BUG_ON(!i915_request_completed(request
));
423 trace_i915_request_retire(request
);
425 advance_ring(request
);
426 free_capture_list(request
);
429 * Walk through the active list, calling retire on each. This allows
430 * objects to track their GPU activity and mark themselves as idle
431 * when their *last* active request is completed (updating state
432 * tracking lists for eviction, active references for GEM, etc).
434 * As the ->retire() may free the node, we decouple it first and
435 * pass along the auxiliary information (to avoid dereferencing
436 * the node after the callback).
438 list_for_each_entry_safe(active
, next
, &request
->active_list
, link
) {
440 * In microbenchmarks or focusing upon time inside the kernel,
441 * we may spend an inordinate amount of time simply handling
442 * the retirement of requests and processing their callbacks.
443 * Of which, this loop itself is particularly hot due to the
444 * cache misses when jumping around the list of i915_gem_active.
445 * So we try to keep this loop as streamlined as possible and
446 * also prefetch the next i915_gem_active to try and hide
447 * the likely cache miss.
451 INIT_LIST_HEAD(&active
->link
);
452 RCU_INIT_POINTER(active
->request
, NULL
);
454 active
->retire(active
, request
);
457 i915_request_remove_from_client(request
);
459 /* Retirement decays the ban score as it is a sign of ctx progress */
460 atomic_dec_if_positive(&request
->gem_context
->ban_score
);
461 intel_context_unpin(request
->hw_context
);
463 __retire_engine_upto(request
->engine
, request
);
465 unreserve_gt(request
->i915
);
467 i915_sched_node_fini(request
->i915
, &request
->sched
);
468 i915_request_put(request
);
471 void i915_request_retire_upto(struct i915_request
*rq
)
473 struct intel_ring
*ring
= rq
->ring
;
474 struct i915_request
*tmp
;
476 GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n",
478 rq
->fence
.context
, rq
->fence
.seqno
,
480 intel_engine_get_seqno(rq
->engine
));
482 lockdep_assert_held(&rq
->i915
->drm
.struct_mutex
);
483 GEM_BUG_ON(!i915_request_completed(rq
));
485 if (list_empty(&rq
->ring_link
))
489 tmp
= list_first_entry(&ring
->request_list
,
490 typeof(*tmp
), ring_link
);
492 i915_request_retire(tmp
);
496 static u32
timeline_get_seqno(struct i915_timeline
*tl
)
501 static void move_to_timeline(struct i915_request
*request
,
502 struct i915_timeline
*timeline
)
504 GEM_BUG_ON(request
->timeline
== &request
->engine
->timeline
);
505 lockdep_assert_held(&request
->engine
->timeline
.lock
);
507 spin_lock(&request
->timeline
->lock
);
508 list_move_tail(&request
->link
, &timeline
->requests
);
509 spin_unlock(&request
->timeline
->lock
);
512 void __i915_request_submit(struct i915_request
*request
)
514 struct intel_engine_cs
*engine
= request
->engine
;
517 GEM_TRACE("%s fence %llx:%d -> global=%d, current %d\n",
519 request
->fence
.context
, request
->fence
.seqno
,
520 engine
->timeline
.seqno
+ 1,
521 intel_engine_get_seqno(engine
));
523 GEM_BUG_ON(!irqs_disabled());
524 lockdep_assert_held(&engine
->timeline
.lock
);
526 GEM_BUG_ON(request
->global_seqno
);
528 seqno
= timeline_get_seqno(&engine
->timeline
);
530 GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine
), seqno
));
532 /* We may be recursing from the signal callback of another i915 fence */
533 spin_lock_nested(&request
->lock
, SINGLE_DEPTH_NESTING
);
534 request
->global_seqno
= seqno
;
535 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT
, &request
->fence
.flags
))
536 intel_engine_enable_signaling(request
, false);
537 spin_unlock(&request
->lock
);
539 engine
->emit_breadcrumb(request
,
540 request
->ring
->vaddr
+ request
->postfix
);
542 /* Transfer from per-context onto the global per-engine timeline */
543 move_to_timeline(request
, &engine
->timeline
);
545 trace_i915_request_execute(request
);
547 wake_up_all(&request
->execute
);
550 void i915_request_submit(struct i915_request
*request
)
552 struct intel_engine_cs
*engine
= request
->engine
;
555 /* Will be called from irq-context when using foreign fences. */
556 spin_lock_irqsave(&engine
->timeline
.lock
, flags
);
558 __i915_request_submit(request
);
560 spin_unlock_irqrestore(&engine
->timeline
.lock
, flags
);
563 void __i915_request_unsubmit(struct i915_request
*request
)
565 struct intel_engine_cs
*engine
= request
->engine
;
567 GEM_TRACE("%s fence %llx:%d <- global=%d, current %d\n",
569 request
->fence
.context
, request
->fence
.seqno
,
570 request
->global_seqno
,
571 intel_engine_get_seqno(engine
));
573 GEM_BUG_ON(!irqs_disabled());
574 lockdep_assert_held(&engine
->timeline
.lock
);
577 * Only unwind in reverse order, required so that the per-context list
578 * is kept in seqno/ring order.
580 GEM_BUG_ON(!request
->global_seqno
);
581 GEM_BUG_ON(request
->global_seqno
!= engine
->timeline
.seqno
);
582 GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine
),
583 request
->global_seqno
));
584 engine
->timeline
.seqno
--;
586 /* We may be recursing from the signal callback of another i915 fence */
587 spin_lock_nested(&request
->lock
, SINGLE_DEPTH_NESTING
);
588 request
->global_seqno
= 0;
589 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT
, &request
->fence
.flags
))
590 intel_engine_cancel_signaling(request
);
591 spin_unlock(&request
->lock
);
593 /* Transfer back from the global per-engine timeline to per-context */
594 move_to_timeline(request
, request
->timeline
);
597 * We don't need to wake_up any waiters on request->execute, they
598 * will get woken by any other event or us re-adding this request
599 * to the engine timeline (__i915_request_submit()). The waiters
600 * should be quite adapt at finding that the request now has a new
601 * global_seqno to the one they went to sleep on.
605 void i915_request_unsubmit(struct i915_request
*request
)
607 struct intel_engine_cs
*engine
= request
->engine
;
610 /* Will be called from irq-context when using foreign fences. */
611 spin_lock_irqsave(&engine
->timeline
.lock
, flags
);
613 __i915_request_unsubmit(request
);
615 spin_unlock_irqrestore(&engine
->timeline
.lock
, flags
);
618 static int __i915_sw_fence_call
619 submit_notify(struct i915_sw_fence
*fence
, enum i915_sw_fence_notify state
)
621 struct i915_request
*request
=
622 container_of(fence
, typeof(*request
), submit
);
626 trace_i915_request_submit(request
);
628 * We need to serialize use of the submit_request() callback
629 * with its hotplugging performed during an emergency
630 * i915_gem_set_wedged(). We use the RCU mechanism to mark the
631 * critical section in order to force i915_gem_set_wedged() to
632 * wait until the submit_request() is completed before
636 request
->engine
->submit_request(request
);
641 i915_request_put(request
);
649 * i915_request_alloc - allocate a request structure
651 * @engine: engine that we wish to issue the request on.
652 * @ctx: context that the request will be associated with.
654 * Returns a pointer to the allocated request if successful,
655 * or an error code if not.
657 struct i915_request
*
658 i915_request_alloc(struct intel_engine_cs
*engine
, struct i915_gem_context
*ctx
)
660 struct drm_i915_private
*i915
= engine
->i915
;
661 struct i915_request
*rq
;
662 struct intel_context
*ce
;
665 lockdep_assert_held(&i915
->drm
.struct_mutex
);
668 * Preempt contexts are reserved for exclusive use to inject a
669 * preemption context switch. They are never to be used for any trivial
672 GEM_BUG_ON(ctx
== i915
->preempt_context
);
675 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
676 * EIO if the GPU is already wedged.
678 if (i915_terminally_wedged(&i915
->gpu_error
))
679 return ERR_PTR(-EIO
);
682 * Pinning the contexts may generate requests in order to acquire
683 * GGTT space, so do this first before we reserve a seqno for
686 ce
= intel_context_pin(ctx
, engine
);
690 ret
= reserve_gt(i915
);
694 ret
= intel_ring_wait_for_space(ce
->ring
, MIN_SPACE_FOR_ADD_REQUEST
);
698 /* Move our oldest request to the slab-cache (if not in use!) */
699 rq
= list_first_entry(&ce
->ring
->request_list
, typeof(*rq
), ring_link
);
700 if (!list_is_last(&rq
->ring_link
, &ce
->ring
->request_list
) &&
701 i915_request_completed(rq
))
702 i915_request_retire(rq
);
705 * Beware: Dragons be flying overhead.
707 * We use RCU to look up requests in flight. The lookups may
708 * race with the request being allocated from the slab freelist.
709 * That is the request we are writing to here, may be in the process
710 * of being read by __i915_gem_active_get_rcu(). As such,
711 * we have to be very careful when overwriting the contents. During
712 * the RCU lookup, we change chase the request->engine pointer,
713 * read the request->global_seqno and increment the reference count.
715 * The reference count is incremented atomically. If it is zero,
716 * the lookup knows the request is unallocated and complete. Otherwise,
717 * it is either still in use, or has been reallocated and reset
718 * with dma_fence_init(). This increment is safe for release as we
719 * check that the request we have a reference to and matches the active
722 * Before we increment the refcount, we chase the request->engine
723 * pointer. We must not call kmem_cache_zalloc() or else we set
724 * that pointer to NULL and cause a crash during the lookup. If
725 * we see the request is completed (based on the value of the
726 * old engine and seqno), the lookup is complete and reports NULL.
727 * If we decide the request is not completed (new engine or seqno),
728 * then we grab a reference and double check that it is still the
729 * active request - which it won't be and restart the lookup.
731 * Do not use kmem_cache_zalloc() here!
733 rq
= kmem_cache_alloc(i915
->requests
,
734 GFP_KERNEL
| __GFP_RETRY_MAYFAIL
| __GFP_NOWARN
);
736 /* Ratelimit ourselves to prevent oom from malicious clients */
737 ret
= i915_gem_wait_for_idle(i915
,
739 I915_WAIT_INTERRUPTIBLE
,
740 MAX_SCHEDULE_TIMEOUT
);
745 * We've forced the client to stall and catch up with whatever
746 * backlog there might have been. As we are assuming that we
747 * caused the mempressure, now is an opportune time to
748 * recover as much memory from the request pool as is possible.
749 * Having already penalized the client to stall, we spend
750 * a little extra time to re-optimise page allocation.
752 kmem_cache_shrink(i915
->requests
);
753 rcu_barrier(); /* Recover the TYPESAFE_BY_RCU pages */
755 rq
= kmem_cache_alloc(i915
->requests
, GFP_KERNEL
);
762 INIT_LIST_HEAD(&rq
->active_list
);
765 rq
->gem_context
= ctx
;
768 rq
->timeline
= ce
->ring
->timeline
;
769 GEM_BUG_ON(rq
->timeline
== &engine
->timeline
);
771 spin_lock_init(&rq
->lock
);
772 dma_fence_init(&rq
->fence
,
775 rq
->timeline
->fence_context
,
776 timeline_get_seqno(rq
->timeline
));
778 /* We bump the ref for the fence chain */
779 i915_sw_fence_init(&i915_request_get(rq
)->submit
, submit_notify
);
780 init_waitqueue_head(&rq
->execute
);
782 i915_sched_node_init(&rq
->sched
);
784 /* No zalloc, must clear what we need by hand */
785 rq
->global_seqno
= 0;
786 rq
->signaling
.wait
.seqno
= 0;
787 rq
->file_priv
= NULL
;
789 rq
->capture_list
= NULL
;
790 rq
->waitboost
= false;
793 * Reserve space in the ring buffer for all the commands required to
794 * eventually emit this request. This is to guarantee that the
795 * i915_request_add() call can't fail. Note that the reserve may need
796 * to be redone if the request is not actually submitted straight
797 * away, e.g. because a GPU scheduler has deferred it.
799 rq
->reserved_space
= MIN_SPACE_FOR_ADD_REQUEST
;
800 GEM_BUG_ON(rq
->reserved_space
< engine
->emit_breadcrumb_sz
);
803 * Record the position of the start of the request so that
804 * should we detect the updated seqno part-way through the
805 * GPU processing the request, we never over-estimate the
806 * position of the head.
808 rq
->head
= rq
->ring
->emit
;
810 /* Unconditionally invalidate GPU caches and TLBs. */
811 ret
= engine
->emit_flush(rq
, EMIT_INVALIDATE
);
815 ret
= engine
->request_alloc(rq
);
819 /* Keep a second pin for the dual retirement along engine and ring */
820 __intel_context_pin(ce
);
822 rq
->infix
= rq
->ring
->emit
; /* end of header; start of user payload */
824 /* Check that we didn't interrupt ourselves with a new request */
825 GEM_BUG_ON(rq
->timeline
->seqno
!= rq
->fence
.seqno
);
829 ce
->ring
->emit
= rq
->head
;
831 /* Make sure we didn't add ourselves to external state before freeing */
832 GEM_BUG_ON(!list_empty(&rq
->active_list
));
833 GEM_BUG_ON(!list_empty(&rq
->sched
.signalers_list
));
834 GEM_BUG_ON(!list_empty(&rq
->sched
.waiters_list
));
836 kmem_cache_free(i915
->requests
, rq
);
840 intel_context_unpin(ce
);
845 i915_request_await_request(struct i915_request
*to
, struct i915_request
*from
)
849 GEM_BUG_ON(to
== from
);
850 GEM_BUG_ON(to
->timeline
== from
->timeline
);
852 if (i915_request_completed(from
))
855 if (to
->engine
->schedule
) {
856 ret
= i915_sched_node_add_dependency(to
->i915
,
863 if (to
->engine
== from
->engine
) {
864 ret
= i915_sw_fence_await_sw_fence_gfp(&to
->submit
,
867 return ret
< 0 ? ret
: 0;
870 if (to
->engine
->semaphore
.sync_to
) {
873 GEM_BUG_ON(!from
->engine
->semaphore
.signal
);
875 seqno
= i915_request_global_seqno(from
);
877 goto await_dma_fence
;
879 if (seqno
<= to
->timeline
->global_sync
[from
->engine
->id
])
882 trace_i915_gem_ring_sync_to(to
, from
);
883 ret
= to
->engine
->semaphore
.sync_to(to
, from
);
887 to
->timeline
->global_sync
[from
->engine
->id
] = seqno
;
892 ret
= i915_sw_fence_await_dma_fence(&to
->submit
,
895 return ret
< 0 ? ret
: 0;
899 i915_request_await_dma_fence(struct i915_request
*rq
, struct dma_fence
*fence
)
901 struct dma_fence
**child
= &fence
;
902 unsigned int nchild
= 1;
906 * Note that if the fence-array was created in signal-on-any mode,
907 * we should *not* decompose it into its individual fences. However,
908 * we don't currently store which mode the fence-array is operating
909 * in. Fortunately, the only user of signal-on-any is private to
910 * amdgpu and we should not see any incoming fence-array from
911 * sync-file being in signal-on-any mode.
913 if (dma_fence_is_array(fence
)) {
914 struct dma_fence_array
*array
= to_dma_fence_array(fence
);
916 child
= array
->fences
;
917 nchild
= array
->num_fences
;
923 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &fence
->flags
))
927 * Requests on the same timeline are explicitly ordered, along
928 * with their dependencies, by i915_request_add() which ensures
929 * that requests are submitted in-order through each ring.
931 if (fence
->context
== rq
->fence
.context
)
934 /* Squash repeated waits to the same timelines */
935 if (fence
->context
!= rq
->i915
->mm
.unordered_timeline
&&
936 i915_timeline_sync_is_later(rq
->timeline
, fence
))
939 if (dma_fence_is_i915(fence
))
940 ret
= i915_request_await_request(rq
, to_request(fence
));
942 ret
= i915_sw_fence_await_dma_fence(&rq
->submit
, fence
,
948 /* Record the latest fence used against each timeline */
949 if (fence
->context
!= rq
->i915
->mm
.unordered_timeline
)
950 i915_timeline_sync_set(rq
->timeline
, fence
);
957 * i915_request_await_object - set this request to (async) wait upon a bo
958 * @to: request we are wishing to use
959 * @obj: object which may be in use on another ring.
960 * @write: whether the wait is on behalf of a writer
962 * This code is meant to abstract object synchronization with the GPU.
963 * Conceptually we serialise writes between engines inside the GPU.
964 * We only allow one engine to write into a buffer at any time, but
965 * multiple readers. To ensure each has a coherent view of memory, we must:
967 * - If there is an outstanding write request to the object, the new
968 * request must wait for it to complete (either CPU or in hw, requests
969 * on the same ring will be naturally ordered).
971 * - If we are a write request (pending_write_domain is set), the new
972 * request must wait for outstanding read requests to complete.
974 * Returns 0 if successful, else propagates up the lower layer error.
977 i915_request_await_object(struct i915_request
*to
,
978 struct drm_i915_gem_object
*obj
,
981 struct dma_fence
*excl
;
985 struct dma_fence
**shared
;
986 unsigned int count
, i
;
988 ret
= reservation_object_get_fences_rcu(obj
->resv
,
989 &excl
, &count
, &shared
);
993 for (i
= 0; i
< count
; i
++) {
994 ret
= i915_request_await_dma_fence(to
, shared
[i
]);
998 dma_fence_put(shared
[i
]);
1001 for (; i
< count
; i
++)
1002 dma_fence_put(shared
[i
]);
1005 excl
= reservation_object_get_excl_rcu(obj
->resv
);
1010 ret
= i915_request_await_dma_fence(to
, excl
);
1012 dma_fence_put(excl
);
1018 void i915_request_skip(struct i915_request
*rq
, int error
)
1020 void *vaddr
= rq
->ring
->vaddr
;
1023 GEM_BUG_ON(!IS_ERR_VALUE((long)error
));
1024 dma_fence_set_error(&rq
->fence
, error
);
1027 * As this request likely depends on state from the lost
1028 * context, clear out all the user operations leaving the
1029 * breadcrumb at the end (so we get the fence notifications).
1032 if (rq
->postfix
< head
) {
1033 memset(vaddr
+ head
, 0, rq
->ring
->size
- head
);
1036 memset(vaddr
+ head
, 0, rq
->postfix
- head
);
1040 * NB: This function is not allowed to fail. Doing so would mean the the
1041 * request is not being tracked for completion but the work itself is
1042 * going to happen on the hardware. This would be a Bad Thing(tm).
1044 void i915_request_add(struct i915_request
*request
)
1046 struct intel_engine_cs
*engine
= request
->engine
;
1047 struct i915_timeline
*timeline
= request
->timeline
;
1048 struct intel_ring
*ring
= request
->ring
;
1049 struct i915_request
*prev
;
1052 GEM_TRACE("%s fence %llx:%d\n",
1053 engine
->name
, request
->fence
.context
, request
->fence
.seqno
);
1055 lockdep_assert_held(&request
->i915
->drm
.struct_mutex
);
1056 trace_i915_request_add(request
);
1059 * Make sure that no request gazumped us - if it was allocated after
1060 * our i915_request_alloc() and called __i915_request_add() before
1061 * us, the timeline will hold its seqno which is later than ours.
1063 GEM_BUG_ON(timeline
->seqno
!= request
->fence
.seqno
);
1066 * To ensure that this call will not fail, space for its emissions
1067 * should already have been reserved in the ring buffer. Let the ring
1068 * know that it is time to use that space up.
1070 request
->reserved_space
= 0;
1071 engine
->emit_flush(request
, EMIT_FLUSH
);
1074 * Record the position of the start of the breadcrumb so that
1075 * should we detect the updated seqno part-way through the
1076 * GPU processing the request, we never over-estimate the
1077 * position of the ring's HEAD.
1079 cs
= intel_ring_begin(request
, engine
->emit_breadcrumb_sz
);
1080 GEM_BUG_ON(IS_ERR(cs
));
1081 request
->postfix
= intel_ring_offset(request
, cs
);
1084 * Seal the request and mark it as pending execution. Note that
1085 * we may inspect this state, without holding any locks, during
1086 * hangcheck. Hence we apply the barrier to ensure that we do not
1087 * see a more recent value in the hws than we are tracking.
1090 prev
= i915_gem_active_raw(&timeline
->last_request
,
1091 &request
->i915
->drm
.struct_mutex
);
1092 if (prev
&& !i915_request_completed(prev
)) {
1093 i915_sw_fence_await_sw_fence(&request
->submit
, &prev
->submit
,
1095 if (engine
->schedule
)
1096 __i915_sched_node_add_dependency(&request
->sched
,
1102 spin_lock_irq(&timeline
->lock
);
1103 list_add_tail(&request
->link
, &timeline
->requests
);
1104 spin_unlock_irq(&timeline
->lock
);
1106 GEM_BUG_ON(timeline
->seqno
!= request
->fence
.seqno
);
1107 i915_gem_active_set(&timeline
->last_request
, request
);
1109 list_add_tail(&request
->ring_link
, &ring
->request_list
);
1110 if (list_is_first(&request
->ring_link
, &ring
->request_list
)) {
1111 GEM_TRACE("marking %s as active\n", ring
->timeline
->name
);
1112 list_add(&ring
->active_link
, &request
->i915
->gt
.active_rings
);
1114 request
->emitted_jiffies
= jiffies
;
1117 * Let the backend know a new request has arrived that may need
1118 * to adjust the existing execution schedule due to a high priority
1119 * request - i.e. we may want to preempt the current request in order
1120 * to run a high priority dependency chain *before* we can execute this
1123 * This is called before the request is ready to run so that we can
1124 * decide whether to preempt the entire chain so that it is ready to
1125 * run at the earliest possible convenience.
1128 rcu_read_lock(); /* RCU serialisation for set-wedged protection */
1129 if (engine
->schedule
)
1130 engine
->schedule(request
, &request
->gem_context
->sched
);
1132 i915_sw_fence_commit(&request
->submit
);
1133 local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
1136 * In typical scenarios, we do not expect the previous request on
1137 * the timeline to be still tracked by timeline->last_request if it
1138 * has been completed. If the completed request is still here, that
1139 * implies that request retirement is a long way behind submission,
1140 * suggesting that we haven't been retiring frequently enough from
1141 * the combination of retire-before-alloc, waiters and the background
1142 * retirement worker. So if the last request on this timeline was
1143 * already completed, do a catch up pass, flushing the retirement queue
1144 * up to this client. Since we have now moved the heaviest operations
1145 * during retirement onto secondary workers, such as freeing objects
1146 * or contexts, retiring a bunch of requests is mostly list management
1147 * (and cache misses), and so we should not be overly penalizing this
1148 * client by performing excess work, though we may still performing
1149 * work on behalf of others -- but instead we should benefit from
1150 * improved resource management. (Well, that's the theory at least.)
1152 if (prev
&& i915_request_completed(prev
))
1153 i915_request_retire_upto(prev
);
1156 static unsigned long local_clock_us(unsigned int *cpu
)
1161 * Cheaply and approximately convert from nanoseconds to microseconds.
1162 * The result and subsequent calculations are also defined in the same
1163 * approximate microseconds units. The principal source of timing
1164 * error here is from the simple truncation.
1166 * Note that local_clock() is only defined wrt to the current CPU;
1167 * the comparisons are no longer valid if we switch CPUs. Instead of
1168 * blocking preemption for the entire busywait, we can detect the CPU
1169 * switch and use that as indicator of system load and a reason to
1170 * stop busywaiting, see busywait_stop().
1173 t
= local_clock() >> 10;
1179 static bool busywait_stop(unsigned long timeout
, unsigned int cpu
)
1181 unsigned int this_cpu
;
1183 if (time_after(local_clock_us(&this_cpu
), timeout
))
1186 return this_cpu
!= cpu
;
1189 static bool __i915_spin_request(const struct i915_request
*rq
,
1190 u32 seqno
, int state
, unsigned long timeout_us
)
1192 struct intel_engine_cs
*engine
= rq
->engine
;
1193 unsigned int irq
, cpu
;
1198 * Only wait for the request if we know it is likely to complete.
1200 * We don't track the timestamps around requests, nor the average
1201 * request length, so we do not have a good indicator that this
1202 * request will complete within the timeout. What we do know is the
1203 * order in which requests are executed by the engine and so we can
1204 * tell if the request has started. If the request hasn't started yet,
1205 * it is a fair assumption that it will not complete within our
1206 * relatively short timeout.
1208 if (!i915_seqno_passed(intel_engine_get_seqno(engine
), seqno
- 1))
1212 * When waiting for high frequency requests, e.g. during synchronous
1213 * rendering split between the CPU and GPU, the finite amount of time
1214 * required to set up the irq and wait upon it limits the response
1215 * rate. By busywaiting on the request completion for a short while we
1216 * can service the high frequency waits as quick as possible. However,
1217 * if it is a slow request, we want to sleep as quickly as possible.
1218 * The tradeoff between waiting and sleeping is roughly the time it
1219 * takes to sleep on a request, on the order of a microsecond.
1222 irq
= READ_ONCE(engine
->breadcrumbs
.irq_count
);
1223 timeout_us
+= local_clock_us(&cpu
);
1225 if (i915_seqno_passed(intel_engine_get_seqno(engine
), seqno
))
1226 return seqno
== i915_request_global_seqno(rq
);
1229 * Seqno are meant to be ordered *before* the interrupt. If
1230 * we see an interrupt without a corresponding seqno advance,
1231 * assume we won't see one in the near future but require
1232 * the engine->seqno_barrier() to fixup coherency.
1234 if (READ_ONCE(engine
->breadcrumbs
.irq_count
) != irq
)
1237 if (signal_pending_state(state
, current
))
1240 if (busywait_stop(timeout_us
, cpu
))
1244 } while (!need_resched());
1249 static bool __i915_wait_request_check_and_reset(struct i915_request
*request
)
1251 struct i915_gpu_error
*error
= &request
->i915
->gpu_error
;
1253 if (likely(!i915_reset_handoff(error
)))
1256 __set_current_state(TASK_RUNNING
);
1257 i915_reset(request
->i915
, error
->stalled_mask
, error
->reason
);
1262 * i915_request_wait - wait until execution of request has finished
1263 * @rq: the request to wait upon
1264 * @flags: how to wait
1265 * @timeout: how long to wait in jiffies
1267 * i915_request_wait() waits for the request to be completed, for a
1268 * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
1271 * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
1272 * in via the flags, and vice versa if the struct_mutex is not held, the caller
1273 * must not specify that the wait is locked.
1275 * Returns the remaining time (in jiffies) if the request completed, which may
1276 * be zero or -ETIME if the request is unfinished after the timeout expires.
1277 * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
1278 * pending before the request completes.
1280 long i915_request_wait(struct i915_request
*rq
,
1284 const int state
= flags
& I915_WAIT_INTERRUPTIBLE
?
1285 TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE
;
1286 wait_queue_head_t
*errq
= &rq
->i915
->gpu_error
.wait_queue
;
1287 DEFINE_WAIT_FUNC(reset
, default_wake_function
);
1288 DEFINE_WAIT_FUNC(exec
, default_wake_function
);
1289 struct intel_wait wait
;
1292 #if IS_ENABLED(CONFIG_LOCKDEP)
1293 GEM_BUG_ON(debug_locks
&&
1294 !!lockdep_is_held(&rq
->i915
->drm
.struct_mutex
) !=
1295 !!(flags
& I915_WAIT_LOCKED
));
1297 GEM_BUG_ON(timeout
< 0);
1299 if (i915_request_completed(rq
))
1305 trace_i915_request_wait_begin(rq
, flags
);
1307 add_wait_queue(&rq
->execute
, &exec
);
1308 if (flags
& I915_WAIT_LOCKED
)
1309 add_wait_queue(errq
, &reset
);
1311 intel_wait_init(&wait
);
1315 set_current_state(state
);
1316 if (intel_wait_update_request(&wait
, rq
))
1319 if (flags
& I915_WAIT_LOCKED
&&
1320 __i915_wait_request_check_and_reset(rq
))
1323 if (signal_pending_state(state
, current
)) {
1324 timeout
= -ERESTARTSYS
;
1333 timeout
= io_schedule_timeout(timeout
);
1336 GEM_BUG_ON(!intel_wait_has_seqno(&wait
));
1337 GEM_BUG_ON(!i915_sw_fence_signaled(&rq
->submit
));
1339 /* Optimistic short spin before touching IRQs */
1340 if (__i915_spin_request(rq
, wait
.seqno
, state
, 5))
1343 set_current_state(state
);
1344 if (intel_engine_add_wait(rq
->engine
, &wait
))
1346 * In order to check that we haven't missed the interrupt
1347 * as we enabled it, we need to kick ourselves to do a
1348 * coherent check on the seqno before we sleep.
1352 if (flags
& I915_WAIT_LOCKED
)
1353 __i915_wait_request_check_and_reset(rq
);
1356 if (signal_pending_state(state
, current
)) {
1357 timeout
= -ERESTARTSYS
;
1366 timeout
= io_schedule_timeout(timeout
);
1368 if (intel_wait_complete(&wait
) &&
1369 intel_wait_check_request(&wait
, rq
))
1372 set_current_state(state
);
1376 * Carefully check if the request is complete, giving time
1377 * for the seqno to be visible following the interrupt.
1378 * We also have to check in case we are kicked by the GPU
1379 * reset in order to drop the struct_mutex.
1381 if (__i915_request_irq_complete(rq
))
1385 * If the GPU is hung, and we hold the lock, reset the GPU
1386 * and then check for completion. On a full reset, the engine's
1387 * HW seqno will be advanced passed us and we are complete.
1388 * If we do a partial reset, we have to wait for the GPU to
1389 * resume and update the breadcrumb.
1391 * If we don't hold the mutex, we can just wait for the worker
1392 * to come along and update the breadcrumb (either directly
1393 * itself, or indirectly by recovering the GPU).
1395 if (flags
& I915_WAIT_LOCKED
&&
1396 __i915_wait_request_check_and_reset(rq
))
1399 /* Only spin if we know the GPU is processing this request */
1400 if (__i915_spin_request(rq
, wait
.seqno
, state
, 2))
1403 if (!intel_wait_check_request(&wait
, rq
)) {
1404 intel_engine_remove_wait(rq
->engine
, &wait
);
1409 intel_engine_remove_wait(rq
->engine
, &wait
);
1411 __set_current_state(TASK_RUNNING
);
1412 if (flags
& I915_WAIT_LOCKED
)
1413 remove_wait_queue(errq
, &reset
);
1414 remove_wait_queue(&rq
->execute
, &exec
);
1415 trace_i915_request_wait_end(rq
);
1420 static void ring_retire_requests(struct intel_ring
*ring
)
1422 struct i915_request
*request
, *next
;
1424 list_for_each_entry_safe(request
, next
,
1425 &ring
->request_list
, ring_link
) {
1426 if (!i915_request_completed(request
))
1429 i915_request_retire(request
);
1433 void i915_retire_requests(struct drm_i915_private
*i915
)
1435 struct intel_ring
*ring
, *tmp
;
1437 lockdep_assert_held(&i915
->drm
.struct_mutex
);
1439 if (!i915
->gt
.active_requests
)
1442 list_for_each_entry_safe(ring
, tmp
, &i915
->gt
.active_rings
, active_link
)
1443 ring_retire_requests(ring
);
1446 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1447 #include "selftests/mock_request.c"
1448 #include "selftests/i915_request.c"