2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/dma-fence-array.h>
26 #include <linux/irq_work.h>
27 #include <linux/prefetch.h>
28 #include <linux/sched.h>
29 #include <linux/sched/clock.h>
30 #include <linux/sched/signal.h>
32 #include "gem/i915_gem_context.h"
33 #include "gt/intel_context.h"
34 #include "gt/intel_ring.h"
35 #include "gt/intel_rps.h"
37 #include "i915_active.h"
39 #include "i915_globals.h"
40 #include "i915_trace.h"
44 struct list_head link
;
46 struct i915_sw_fence
*fence
;
47 void (*hook
)(struct i915_request
*rq
, struct dma_fence
*signal
);
48 struct i915_request
*signal
;
51 static struct i915_global_request
{
52 struct i915_global base
;
53 struct kmem_cache
*slab_requests
;
54 struct kmem_cache
*slab_dependencies
;
55 struct kmem_cache
*slab_execute_cbs
;
58 static const char *i915_fence_get_driver_name(struct dma_fence
*fence
)
60 return dev_name(to_request(fence
)->i915
->drm
.dev
);
63 static const char *i915_fence_get_timeline_name(struct dma_fence
*fence
)
65 const struct i915_gem_context
*ctx
;
68 * The timeline struct (as part of the ppgtt underneath a context)
69 * may be freed when the request is no longer in use by the GPU.
70 * We could extend the life of a context to beyond that of all
71 * fences, possibly keeping the hw resource around indefinitely,
72 * or we just give them a false name. Since
73 * dma_fence_ops.get_timeline_name is a debug feature, the occasional
74 * lie seems justifiable.
76 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &fence
->flags
))
79 ctx
= i915_request_gem_context(to_request(fence
));
81 return "[" DRIVER_NAME
"]";
86 static bool i915_fence_signaled(struct dma_fence
*fence
)
88 return i915_request_completed(to_request(fence
));
91 static bool i915_fence_enable_signaling(struct dma_fence
*fence
)
93 return i915_request_enable_breadcrumb(to_request(fence
));
96 static signed long i915_fence_wait(struct dma_fence
*fence
,
100 return i915_request_wait(to_request(fence
),
101 interruptible
| I915_WAIT_PRIORITY
,
105 static void i915_fence_release(struct dma_fence
*fence
)
107 struct i915_request
*rq
= to_request(fence
);
110 * The request is put onto a RCU freelist (i.e. the address
111 * is immediately reused), mark the fences as being freed now.
112 * Otherwise the debugobjects for the fences are only marked as
113 * freed when the slab cache itself is freed, and so we would get
114 * caught trying to reuse dead objects.
116 i915_sw_fence_fini(&rq
->submit
);
117 i915_sw_fence_fini(&rq
->semaphore
);
119 kmem_cache_free(global
.slab_requests
, rq
);
122 const struct dma_fence_ops i915_fence_ops
= {
123 .get_driver_name
= i915_fence_get_driver_name
,
124 .get_timeline_name
= i915_fence_get_timeline_name
,
125 .enable_signaling
= i915_fence_enable_signaling
,
126 .signaled
= i915_fence_signaled
,
127 .wait
= i915_fence_wait
,
128 .release
= i915_fence_release
,
131 static void irq_execute_cb(struct irq_work
*wrk
)
133 struct execute_cb
*cb
= container_of(wrk
, typeof(*cb
), work
);
135 i915_sw_fence_complete(cb
->fence
);
136 kmem_cache_free(global
.slab_execute_cbs
, cb
);
139 static void irq_execute_cb_hook(struct irq_work
*wrk
)
141 struct execute_cb
*cb
= container_of(wrk
, typeof(*cb
), work
);
143 cb
->hook(container_of(cb
->fence
, struct i915_request
, submit
),
145 i915_request_put(cb
->signal
);
150 static void __notify_execute_cb(struct i915_request
*rq
)
152 struct execute_cb
*cb
;
154 lockdep_assert_held(&rq
->lock
);
156 if (list_empty(&rq
->execute_cb
))
159 list_for_each_entry(cb
, &rq
->execute_cb
, link
)
160 irq_work_queue(&cb
->work
);
163 * XXX Rollback on __i915_request_unsubmit()
165 * In the future, perhaps when we have an active time-slicing scheduler,
166 * it will be interesting to unsubmit parallel execution and remove
167 * busywaits from the GPU until their master is restarted. This is
168 * quite hairy, we have to carefully rollback the fence and do a
169 * preempt-to-idle cycle on the target engine, all the while the
170 * master execute_cb may refire.
172 INIT_LIST_HEAD(&rq
->execute_cb
);
176 remove_from_client(struct i915_request
*request
)
178 struct drm_i915_file_private
*file_priv
;
180 if (!READ_ONCE(request
->file_priv
))
184 file_priv
= xchg(&request
->file_priv
, NULL
);
186 spin_lock(&file_priv
->mm
.lock
);
187 list_del(&request
->client_link
);
188 spin_unlock(&file_priv
->mm
.lock
);
193 static void free_capture_list(struct i915_request
*request
)
195 struct i915_capture_list
*capture
;
197 capture
= fetch_and_zero(&request
->capture_list
);
199 struct i915_capture_list
*next
= capture
->next
;
206 static void remove_from_engine(struct i915_request
*rq
)
208 struct intel_engine_cs
*engine
, *locked
;
211 * Virtual engines complicate acquiring the engine timeline lock,
212 * as their rq->engine pointer is not stable until under that
213 * engine lock. The simple ploy we use is to take the lock then
214 * check that the rq still belongs to the newly locked engine.
216 locked
= READ_ONCE(rq
->engine
);
217 spin_lock_irq(&locked
->active
.lock
);
218 while (unlikely(locked
!= (engine
= READ_ONCE(rq
->engine
)))) {
219 spin_unlock(&locked
->active
.lock
);
220 spin_lock(&engine
->active
.lock
);
223 list_del_init(&rq
->sched
.link
);
224 spin_unlock_irq(&locked
->active
.lock
);
227 bool i915_request_retire(struct i915_request
*rq
)
229 if (!i915_request_completed(rq
))
234 GEM_BUG_ON(!i915_sw_fence_signaled(&rq
->submit
));
235 trace_i915_request_retire(rq
);
238 * We know the GPU must have read the request to have
239 * sent us the seqno + interrupt, so use the position
240 * of tail of the request to update the last known position
243 * Note this requires that we are always called in request
246 GEM_BUG_ON(!list_is_first(&rq
->link
,
247 &i915_request_timeline(rq
)->requests
));
248 rq
->ring
->head
= rq
->postfix
;
251 * We only loosely track inflight requests across preemption,
252 * and so we may find ourselves attempting to retire a _completed_
253 * request that we have removed from the HW and put back on a run
256 remove_from_engine(rq
);
258 spin_lock_irq(&rq
->lock
);
259 i915_request_mark_complete(rq
);
260 if (!i915_request_signaled(rq
))
261 dma_fence_signal_locked(&rq
->fence
);
262 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT
, &rq
->fence
.flags
))
263 i915_request_cancel_breadcrumb(rq
);
264 if (i915_request_has_waitboost(rq
)) {
265 GEM_BUG_ON(!atomic_read(&rq
->engine
->gt
->rps
.num_waiters
));
266 atomic_dec(&rq
->engine
->gt
->rps
.num_waiters
);
268 if (!test_bit(I915_FENCE_FLAG_ACTIVE
, &rq
->fence
.flags
)) {
269 set_bit(I915_FENCE_FLAG_ACTIVE
, &rq
->fence
.flags
);
270 __notify_execute_cb(rq
);
272 GEM_BUG_ON(!list_empty(&rq
->execute_cb
));
273 spin_unlock_irq(&rq
->lock
);
275 remove_from_client(rq
);
278 intel_context_exit(rq
->context
);
279 intel_context_unpin(rq
->context
);
281 free_capture_list(rq
);
282 i915_sched_node_fini(&rq
->sched
);
283 i915_request_put(rq
);
288 void i915_request_retire_upto(struct i915_request
*rq
)
290 struct intel_timeline
* const tl
= i915_request_timeline(rq
);
291 struct i915_request
*tmp
;
295 GEM_BUG_ON(!i915_request_completed(rq
));
298 tmp
= list_first_entry(&tl
->requests
, typeof(*tmp
), link
);
299 } while (i915_request_retire(tmp
) && tmp
!= rq
);
303 __await_execution(struct i915_request
*rq
,
304 struct i915_request
*signal
,
305 void (*hook
)(struct i915_request
*rq
,
306 struct dma_fence
*signal
),
309 struct execute_cb
*cb
;
311 if (i915_request_is_active(signal
)) {
313 hook(rq
, &signal
->fence
);
317 cb
= kmem_cache_alloc(global
.slab_execute_cbs
, gfp
);
321 cb
->fence
= &rq
->submit
;
322 i915_sw_fence_await(cb
->fence
);
323 init_irq_work(&cb
->work
, irq_execute_cb
);
327 cb
->signal
= i915_request_get(signal
);
328 cb
->work
.func
= irq_execute_cb_hook
;
331 spin_lock_irq(&signal
->lock
);
332 if (i915_request_is_active(signal
)) {
334 hook(rq
, &signal
->fence
);
335 i915_request_put(signal
);
337 i915_sw_fence_complete(cb
->fence
);
338 kmem_cache_free(global
.slab_execute_cbs
, cb
);
340 list_add_tail(&cb
->link
, &signal
->execute_cb
);
342 spin_unlock_irq(&signal
->lock
);
344 /* Copy across semaphore status as we need the same behaviour */
345 rq
->sched
.flags
|= signal
->sched
.flags
;
349 bool __i915_request_submit(struct i915_request
*request
)
351 struct intel_engine_cs
*engine
= request
->engine
;
354 RQ_TRACE(request
, "\n");
356 GEM_BUG_ON(!irqs_disabled());
357 lockdep_assert_held(&engine
->active
.lock
);
360 * With the advent of preempt-to-busy, we frequently encounter
361 * requests that we have unsubmitted from HW, but left running
362 * until the next ack and so have completed in the meantime. On
363 * resubmission of that completed request, we can skip
364 * updating the payload, and execlists can even skip submitting
367 * We must remove the request from the caller's priority queue,
368 * and the caller must only call us when the request is in their
369 * priority queue, under the active.lock. This ensures that the
370 * request has *not* yet been retired and we can safely move
371 * the request into the engine->active.list where it will be
372 * dropped upon retiring. (Otherwise if resubmit a *retired*
373 * request, this would be a horrible use-after-free.)
375 if (i915_request_completed(request
))
378 if (intel_context_is_banned(request
->context
))
379 i915_request_skip(request
, -EIO
);
382 * Are we using semaphores when the gpu is already saturated?
384 * Using semaphores incurs a cost in having the GPU poll a
385 * memory location, busywaiting for it to change. The continual
386 * memory reads can have a noticeable impact on the rest of the
387 * system with the extra bus traffic, stalling the cpu as it too
388 * tries to access memory across the bus (perf stat -e bus-cycles).
390 * If we installed a semaphore on this request and we only submit
391 * the request after the signaler completed, that indicates the
392 * system is overloaded and using semaphores at this time only
393 * increases the amount of work we are doing. If so, we disable
394 * further use of semaphores until we are idle again, whence we
395 * optimistically try again.
397 if (request
->sched
.semaphores
&&
398 i915_sw_fence_signaled(&request
->semaphore
))
399 engine
->saturated
|= request
->sched
.semaphores
;
401 engine
->emit_fini_breadcrumb(request
,
402 request
->ring
->vaddr
+ request
->postfix
);
404 trace_i915_request_execute(request
);
408 xfer
: /* We may be recursing from the signal callback of another i915 fence */
409 spin_lock_nested(&request
->lock
, SINGLE_DEPTH_NESTING
);
411 if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE
, &request
->fence
.flags
))
412 list_move_tail(&request
->sched
.link
, &engine
->active
.requests
);
414 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT
, &request
->fence
.flags
) &&
415 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &request
->fence
.flags
) &&
416 !i915_request_enable_breadcrumb(request
))
417 intel_engine_signal_breadcrumbs(engine
);
419 __notify_execute_cb(request
);
421 spin_unlock(&request
->lock
);
426 void i915_request_submit(struct i915_request
*request
)
428 struct intel_engine_cs
*engine
= request
->engine
;
431 /* Will be called from irq-context when using foreign fences. */
432 spin_lock_irqsave(&engine
->active
.lock
, flags
);
434 __i915_request_submit(request
);
436 spin_unlock_irqrestore(&engine
->active
.lock
, flags
);
439 void __i915_request_unsubmit(struct i915_request
*request
)
441 struct intel_engine_cs
*engine
= request
->engine
;
443 RQ_TRACE(request
, "\n");
445 GEM_BUG_ON(!irqs_disabled());
446 lockdep_assert_held(&engine
->active
.lock
);
449 * Only unwind in reverse order, required so that the per-context list
450 * is kept in seqno/ring order.
453 /* We may be recursing from the signal callback of another i915 fence */
454 spin_lock_nested(&request
->lock
, SINGLE_DEPTH_NESTING
);
456 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT
, &request
->fence
.flags
))
457 i915_request_cancel_breadcrumb(request
);
459 GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE
, &request
->fence
.flags
));
460 clear_bit(I915_FENCE_FLAG_ACTIVE
, &request
->fence
.flags
);
462 spin_unlock(&request
->lock
);
464 /* We've already spun, don't charge on resubmitting. */
465 if (request
->sched
.semaphores
&& i915_request_started(request
)) {
466 request
->sched
.attr
.priority
|= I915_PRIORITY_NOSEMAPHORE
;
467 request
->sched
.semaphores
= 0;
471 * We don't need to wake_up any waiters on request->execute, they
472 * will get woken by any other event or us re-adding this request
473 * to the engine timeline (__i915_request_submit()). The waiters
474 * should be quite adapt at finding that the request now has a new
475 * global_seqno to the one they went to sleep on.
479 void i915_request_unsubmit(struct i915_request
*request
)
481 struct intel_engine_cs
*engine
= request
->engine
;
484 /* Will be called from irq-context when using foreign fences. */
485 spin_lock_irqsave(&engine
->active
.lock
, flags
);
487 __i915_request_unsubmit(request
);
489 spin_unlock_irqrestore(&engine
->active
.lock
, flags
);
492 static int __i915_sw_fence_call
493 submit_notify(struct i915_sw_fence
*fence
, enum i915_sw_fence_notify state
)
495 struct i915_request
*request
=
496 container_of(fence
, typeof(*request
), submit
);
500 trace_i915_request_submit(request
);
502 if (unlikely(fence
->error
))
503 i915_request_skip(request
, fence
->error
);
506 * We need to serialize use of the submit_request() callback
507 * with its hotplugging performed during an emergency
508 * i915_gem_set_wedged(). We use the RCU mechanism to mark the
509 * critical section in order to force i915_gem_set_wedged() to
510 * wait until the submit_request() is completed before
514 request
->engine
->submit_request(request
);
519 i915_request_put(request
);
526 static int __i915_sw_fence_call
527 semaphore_notify(struct i915_sw_fence
*fence
, enum i915_sw_fence_notify state
)
529 struct i915_request
*request
=
530 container_of(fence
, typeof(*request
), semaphore
);
534 i915_schedule_bump_priority(request
, I915_PRIORITY_NOSEMAPHORE
);
538 i915_request_put(request
);
545 static void retire_requests(struct intel_timeline
*tl
)
547 struct i915_request
*rq
, *rn
;
549 list_for_each_entry_safe(rq
, rn
, &tl
->requests
, link
)
550 if (!i915_request_retire(rq
))
554 static noinline
struct i915_request
*
555 request_alloc_slow(struct intel_timeline
*tl
, gfp_t gfp
)
557 struct i915_request
*rq
;
559 if (list_empty(&tl
->requests
))
562 if (!gfpflags_allow_blocking(gfp
))
565 /* Move our oldest request to the slab-cache (if not in use!) */
566 rq
= list_first_entry(&tl
->requests
, typeof(*rq
), link
);
567 i915_request_retire(rq
);
569 rq
= kmem_cache_alloc(global
.slab_requests
,
570 gfp
| __GFP_RETRY_MAYFAIL
| __GFP_NOWARN
);
574 /* Ratelimit ourselves to prevent oom from malicious clients */
575 rq
= list_last_entry(&tl
->requests
, typeof(*rq
), link
);
576 cond_synchronize_rcu(rq
->rcustate
);
578 /* Retire our old requests in the hope that we free some */
582 return kmem_cache_alloc(global
.slab_requests
, gfp
);
585 static void __i915_request_ctor(void *arg
)
587 struct i915_request
*rq
= arg
;
589 spin_lock_init(&rq
->lock
);
590 i915_sched_node_init(&rq
->sched
);
591 i915_sw_fence_init(&rq
->submit
, submit_notify
);
592 i915_sw_fence_init(&rq
->semaphore
, semaphore_notify
);
594 rq
->file_priv
= NULL
;
595 rq
->capture_list
= NULL
;
597 INIT_LIST_HEAD(&rq
->execute_cb
);
600 struct i915_request
*
601 __i915_request_create(struct intel_context
*ce
, gfp_t gfp
)
603 struct intel_timeline
*tl
= ce
->timeline
;
604 struct i915_request
*rq
;
608 might_sleep_if(gfpflags_allow_blocking(gfp
));
610 /* Check that the caller provided an already pinned context */
611 __intel_context_pin(ce
);
614 * Beware: Dragons be flying overhead.
616 * We use RCU to look up requests in flight. The lookups may
617 * race with the request being allocated from the slab freelist.
618 * That is the request we are writing to here, may be in the process
619 * of being read by __i915_active_request_get_rcu(). As such,
620 * we have to be very careful when overwriting the contents. During
621 * the RCU lookup, we change chase the request->engine pointer,
622 * read the request->global_seqno and increment the reference count.
624 * The reference count is incremented atomically. If it is zero,
625 * the lookup knows the request is unallocated and complete. Otherwise,
626 * it is either still in use, or has been reallocated and reset
627 * with dma_fence_init(). This increment is safe for release as we
628 * check that the request we have a reference to and matches the active
631 * Before we increment the refcount, we chase the request->engine
632 * pointer. We must not call kmem_cache_zalloc() or else we set
633 * that pointer to NULL and cause a crash during the lookup. If
634 * we see the request is completed (based on the value of the
635 * old engine and seqno), the lookup is complete and reports NULL.
636 * If we decide the request is not completed (new engine or seqno),
637 * then we grab a reference and double check that it is still the
638 * active request - which it won't be and restart the lookup.
640 * Do not use kmem_cache_zalloc() here!
642 rq
= kmem_cache_alloc(global
.slab_requests
,
643 gfp
| __GFP_RETRY_MAYFAIL
| __GFP_NOWARN
);
645 rq
= request_alloc_slow(tl
, gfp
);
652 ret
= intel_timeline_get_seqno(tl
, rq
, &seqno
);
656 rq
->i915
= ce
->engine
->i915
;
658 rq
->engine
= ce
->engine
;
660 rq
->execution_mask
= ce
->engine
->mask
;
662 RCU_INIT_POINTER(rq
->timeline
, tl
);
663 RCU_INIT_POINTER(rq
->hwsp_cacheline
, tl
->hwsp_cacheline
);
664 rq
->hwsp_seqno
= tl
->hwsp_seqno
;
666 rq
->rcustate
= get_state_synchronize_rcu(); /* acts as smp_mb() */
668 dma_fence_init(&rq
->fence
, &i915_fence_ops
, &rq
->lock
,
669 tl
->fence_context
, seqno
);
671 /* We bump the ref for the fence chain */
672 i915_sw_fence_reinit(&i915_request_get(rq
)->submit
);
673 i915_sw_fence_reinit(&i915_request_get(rq
)->semaphore
);
675 i915_sched_node_reinit(&rq
->sched
);
677 /* No zalloc, everything must be cleared after use */
679 GEM_BUG_ON(rq
->file_priv
);
680 GEM_BUG_ON(rq
->capture_list
);
681 GEM_BUG_ON(!list_empty(&rq
->execute_cb
));
684 * Reserve space in the ring buffer for all the commands required to
685 * eventually emit this request. This is to guarantee that the
686 * i915_request_add() call can't fail. Note that the reserve may need
687 * to be redone if the request is not actually submitted straight
688 * away, e.g. because a GPU scheduler has deferred it.
690 * Note that due to how we add reserved_space to intel_ring_begin()
691 * we need to double our request to ensure that if we need to wrap
692 * around inside i915_request_add() there is sufficient space at
693 * the beginning of the ring as well.
696 2 * rq
->engine
->emit_fini_breadcrumb_dw
* sizeof(u32
);
699 * Record the position of the start of the request so that
700 * should we detect the updated seqno part-way through the
701 * GPU processing the request, we never over-estimate the
702 * position of the head.
704 rq
->head
= rq
->ring
->emit
;
706 ret
= rq
->engine
->request_alloc(rq
);
710 rq
->infix
= rq
->ring
->emit
; /* end of header; start of user payload */
712 intel_context_mark_active(ce
);
716 ce
->ring
->emit
= rq
->head
;
718 /* Make sure we didn't add ourselves to external state before freeing */
719 GEM_BUG_ON(!list_empty(&rq
->sched
.signalers_list
));
720 GEM_BUG_ON(!list_empty(&rq
->sched
.waiters_list
));
723 kmem_cache_free(global
.slab_requests
, rq
);
725 intel_context_unpin(ce
);
729 struct i915_request
*
730 i915_request_create(struct intel_context
*ce
)
732 struct i915_request
*rq
;
733 struct intel_timeline
*tl
;
735 tl
= intel_context_timeline_lock(ce
);
739 /* Move our oldest request to the slab-cache (if not in use!) */
740 rq
= list_first_entry(&tl
->requests
, typeof(*rq
), link
);
741 if (!list_is_last(&rq
->link
, &tl
->requests
))
742 i915_request_retire(rq
);
744 intel_context_enter(ce
);
745 rq
= __i915_request_create(ce
, GFP_KERNEL
);
746 intel_context_exit(ce
); /* active reference transferred to request */
750 /* Check that we do not interrupt ourselves with a new request */
751 rq
->cookie
= lockdep_pin_lock(&tl
->mutex
);
756 intel_context_timeline_unlock(tl
);
761 i915_request_await_start(struct i915_request
*rq
, struct i915_request
*signal
)
763 struct dma_fence
*fence
;
766 GEM_BUG_ON(i915_request_timeline(rq
) ==
767 rcu_access_pointer(signal
->timeline
));
771 spin_lock_irq(&signal
->lock
);
772 if (!i915_request_started(signal
) &&
773 !list_is_first(&signal
->link
,
774 &rcu_dereference(signal
->timeline
)->requests
)) {
775 struct i915_request
*prev
= list_prev_entry(signal
, link
);
778 * Peek at the request before us in the timeline. That
779 * request will only be valid before it is retired, so
780 * after acquiring a reference to it, confirm that it is
781 * still part of the signaler's timeline.
783 if (i915_request_get_rcu(prev
)) {
784 if (list_next_entry(prev
, link
) == signal
)
785 fence
= &prev
->fence
;
787 i915_request_put(prev
);
790 spin_unlock_irq(&signal
->lock
);
796 if (intel_timeline_sync_is_later(i915_request_timeline(rq
), fence
))
797 err
= i915_sw_fence_await_dma_fence(&rq
->submit
,
800 dma_fence_put(fence
);
805 static intel_engine_mask_t
806 already_busywaiting(struct i915_request
*rq
)
809 * Polling a semaphore causes bus traffic, delaying other users of
810 * both the GPU and CPU. We want to limit the impact on others,
811 * while taking advantage of early submission to reduce GPU
812 * latency. Therefore we restrict ourselves to not using more
813 * than one semaphore from each source, and not using a semaphore
814 * if we have detected the engine is saturated (i.e. would not be
815 * submitted early and cause bus traffic reading an already passed
818 * See the are-we-too-late? check in __i915_request_submit().
820 return rq
->sched
.semaphores
| rq
->engine
->saturated
;
824 __emit_semaphore_wait(struct i915_request
*to
,
825 struct i915_request
*from
,
828 const int has_token
= INTEL_GEN(to
->i915
) >= 12;
833 GEM_BUG_ON(INTEL_GEN(to
->i915
) < 8);
835 /* We need to pin the signaler's HWSP until we are finished reading. */
836 err
= intel_timeline_read_hwsp(from
, to
, &hwsp_offset
);
844 cs
= intel_ring_begin(to
, len
);
849 * Using greater-than-or-equal here means we have to worry
850 * about seqno wraparound. To side step that issue, we swap
851 * the timeline HWSP upon wrapping, so that everyone listening
852 * for the old (pre-wrap) values do not see the much smaller
853 * (post-wrap) values than they were expecting (and so wait
856 *cs
++ = (MI_SEMAPHORE_WAIT
|
857 MI_SEMAPHORE_GLOBAL_GTT
|
859 MI_SEMAPHORE_SAD_GTE_SDD
) +
869 intel_ring_advance(to
, cs
);
874 emit_semaphore_wait(struct i915_request
*to
,
875 struct i915_request
*from
,
878 /* Just emit the first semaphore we see as request space is limited. */
879 if (already_busywaiting(to
) & from
->engine
->mask
)
882 if (i915_request_await_start(to
, from
) < 0)
885 /* Only submit our spinner after the signaler is running! */
886 if (__await_execution(to
, from
, NULL
, gfp
))
889 if (__emit_semaphore_wait(to
, from
, from
->fence
.seqno
))
892 to
->sched
.semaphores
|= from
->engine
->mask
;
893 to
->sched
.flags
|= I915_SCHED_HAS_SEMAPHORE_CHAIN
;
897 return i915_sw_fence_await_dma_fence(&to
->submit
,
903 i915_request_await_request(struct i915_request
*to
, struct i915_request
*from
)
907 GEM_BUG_ON(to
== from
);
908 GEM_BUG_ON(to
->timeline
== from
->timeline
);
910 if (i915_request_completed(from
))
913 if (to
->engine
->schedule
) {
914 ret
= i915_sched_node_add_dependency(&to
->sched
, &from
->sched
);
919 if (to
->engine
== from
->engine
)
920 ret
= i915_sw_fence_await_sw_fence_gfp(&to
->submit
,
923 else if (intel_context_use_semaphores(to
->context
))
924 ret
= emit_semaphore_wait(to
, from
, I915_FENCE_GFP
);
926 ret
= i915_sw_fence_await_dma_fence(&to
->submit
,
932 if (to
->sched
.flags
& I915_SCHED_HAS_SEMAPHORE_CHAIN
) {
933 ret
= i915_sw_fence_await_dma_fence(&to
->semaphore
,
944 i915_request_await_dma_fence(struct i915_request
*rq
, struct dma_fence
*fence
)
946 struct dma_fence
**child
= &fence
;
947 unsigned int nchild
= 1;
951 * Note that if the fence-array was created in signal-on-any mode,
952 * we should *not* decompose it into its individual fences. However,
953 * we don't currently store which mode the fence-array is operating
954 * in. Fortunately, the only user of signal-on-any is private to
955 * amdgpu and we should not see any incoming fence-array from
956 * sync-file being in signal-on-any mode.
958 if (dma_fence_is_array(fence
)) {
959 struct dma_fence_array
*array
= to_dma_fence_array(fence
);
961 child
= array
->fences
;
962 nchild
= array
->num_fences
;
968 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &fence
->flags
)) {
969 i915_sw_fence_set_error_once(&rq
->submit
, fence
->error
);
974 * Requests on the same timeline are explicitly ordered, along
975 * with their dependencies, by i915_request_add() which ensures
976 * that requests are submitted in-order through each ring.
978 if (fence
->context
== rq
->fence
.context
)
981 /* Squash repeated waits to the same timelines */
982 if (fence
->context
&&
983 intel_timeline_sync_is_later(i915_request_timeline(rq
),
987 if (dma_fence_is_i915(fence
))
988 ret
= i915_request_await_request(rq
, to_request(fence
));
990 ret
= i915_sw_fence_await_dma_fence(&rq
->submit
, fence
,
991 fence
->context
? I915_FENCE_TIMEOUT
: 0,
996 /* Record the latest fence used against each timeline */
998 intel_timeline_sync_set(i915_request_timeline(rq
),
1005 static bool intel_timeline_sync_has_start(struct intel_timeline
*tl
,
1006 struct dma_fence
*fence
)
1008 return __intel_timeline_sync_is_later(tl
,
1013 static int intel_timeline_sync_set_start(struct intel_timeline
*tl
,
1014 const struct dma_fence
*fence
)
1016 return __intel_timeline_sync_set(tl
, fence
->context
, fence
->seqno
- 1);
1020 __i915_request_await_execution(struct i915_request
*to
,
1021 struct i915_request
*from
,
1022 void (*hook
)(struct i915_request
*rq
,
1023 struct dma_fence
*signal
))
1027 /* Submit both requests at the same time */
1028 err
= __await_execution(to
, from
, hook
, I915_FENCE_GFP
);
1032 /* Squash repeated depenendices to the same timelines */
1033 if (intel_timeline_sync_has_start(i915_request_timeline(to
),
1037 /* Ensure both start together [after all semaphores in signal] */
1038 if (intel_engine_has_semaphores(to
->engine
))
1039 err
= __emit_semaphore_wait(to
, from
, from
->fence
.seqno
- 1);
1041 err
= i915_request_await_start(to
, from
);
1045 /* Couple the dependency tree for PI on this exposed to->fence */
1046 if (to
->engine
->schedule
) {
1047 err
= i915_sched_node_add_dependency(&to
->sched
, &from
->sched
);
1052 return intel_timeline_sync_set_start(i915_request_timeline(to
),
1057 i915_request_await_execution(struct i915_request
*rq
,
1058 struct dma_fence
*fence
,
1059 void (*hook
)(struct i915_request
*rq
,
1060 struct dma_fence
*signal
))
1062 struct dma_fence
**child
= &fence
;
1063 unsigned int nchild
= 1;
1066 if (dma_fence_is_array(fence
)) {
1067 struct dma_fence_array
*array
= to_dma_fence_array(fence
);
1069 /* XXX Error for signal-on-any fence arrays */
1071 child
= array
->fences
;
1072 nchild
= array
->num_fences
;
1073 GEM_BUG_ON(!nchild
);
1078 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &fence
->flags
)) {
1079 i915_sw_fence_set_error_once(&rq
->submit
, fence
->error
);
1084 * We don't squash repeated fence dependencies here as we
1085 * want to run our callback in all cases.
1088 if (dma_fence_is_i915(fence
))
1089 ret
= __i915_request_await_execution(rq
,
1093 ret
= i915_sw_fence_await_dma_fence(&rq
->submit
, fence
,
1104 * i915_request_await_object - set this request to (async) wait upon a bo
1105 * @to: request we are wishing to use
1106 * @obj: object which may be in use on another ring.
1107 * @write: whether the wait is on behalf of a writer
1109 * This code is meant to abstract object synchronization with the GPU.
1110 * Conceptually we serialise writes between engines inside the GPU.
1111 * We only allow one engine to write into a buffer at any time, but
1112 * multiple readers. To ensure each has a coherent view of memory, we must:
1114 * - If there is an outstanding write request to the object, the new
1115 * request must wait for it to complete (either CPU or in hw, requests
1116 * on the same ring will be naturally ordered).
1118 * - If we are a write request (pending_write_domain is set), the new
1119 * request must wait for outstanding read requests to complete.
1121 * Returns 0 if successful, else propagates up the lower layer error.
1124 i915_request_await_object(struct i915_request
*to
,
1125 struct drm_i915_gem_object
*obj
,
1128 struct dma_fence
*excl
;
1132 struct dma_fence
**shared
;
1133 unsigned int count
, i
;
1135 ret
= dma_resv_get_fences_rcu(obj
->base
.resv
,
1136 &excl
, &count
, &shared
);
1140 for (i
= 0; i
< count
; i
++) {
1141 ret
= i915_request_await_dma_fence(to
, shared
[i
]);
1145 dma_fence_put(shared
[i
]);
1148 for (; i
< count
; i
++)
1149 dma_fence_put(shared
[i
]);
1152 excl
= dma_resv_get_excl_rcu(obj
->base
.resv
);
1157 ret
= i915_request_await_dma_fence(to
, excl
);
1159 dma_fence_put(excl
);
1165 void i915_request_skip(struct i915_request
*rq
, int error
)
1167 void *vaddr
= rq
->ring
->vaddr
;
1170 GEM_BUG_ON(!IS_ERR_VALUE((long)error
));
1171 dma_fence_set_error(&rq
->fence
, error
);
1173 if (rq
->infix
== rq
->postfix
)
1177 * As this request likely depends on state from the lost
1178 * context, clear out all the user operations leaving the
1179 * breadcrumb at the end (so we get the fence notifications).
1182 if (rq
->postfix
< head
) {
1183 memset(vaddr
+ head
, 0, rq
->ring
->size
- head
);
1186 memset(vaddr
+ head
, 0, rq
->postfix
- head
);
1187 rq
->infix
= rq
->postfix
;
1190 static struct i915_request
*
1191 __i915_request_add_to_timeline(struct i915_request
*rq
)
1193 struct intel_timeline
*timeline
= i915_request_timeline(rq
);
1194 struct i915_request
*prev
;
1197 * Dependency tracking and request ordering along the timeline
1198 * is special cased so that we can eliminate redundant ordering
1199 * operations while building the request (we know that the timeline
1200 * itself is ordered, and here we guarantee it).
1202 * As we know we will need to emit tracking along the timeline,
1203 * we embed the hooks into our request struct -- at the cost of
1204 * having to have specialised no-allocation interfaces (which will
1205 * be beneficial elsewhere).
1207 * A second benefit to open-coding i915_request_await_request is
1208 * that we can apply a slight variant of the rules specialised
1209 * for timelines that jump between engines (such as virtual engines).
1210 * If we consider the case of virtual engine, we must emit a dma-fence
1211 * to prevent scheduling of the second request until the first is
1212 * complete (to maximise our greedy late load balancing) and this
1213 * precludes optimising to use semaphores serialisation of a single
1214 * timeline across engines.
1216 prev
= to_request(__i915_active_fence_set(&timeline
->last_request
,
1218 if (prev
&& !i915_request_completed(prev
)) {
1219 if (is_power_of_2(prev
->engine
->mask
| rq
->engine
->mask
))
1220 i915_sw_fence_await_sw_fence(&rq
->submit
,
1224 __i915_sw_fence_await_dma_fence(&rq
->submit
,
1227 if (rq
->engine
->schedule
)
1228 __i915_sched_node_add_dependency(&rq
->sched
,
1234 list_add_tail(&rq
->link
, &timeline
->requests
);
1237 * Make sure that no request gazumped us - if it was allocated after
1238 * our i915_request_alloc() and called __i915_request_add() before
1239 * us, the timeline will hold its seqno which is later than ours.
1241 GEM_BUG_ON(timeline
->seqno
!= rq
->fence
.seqno
);
1247 * NB: This function is not allowed to fail. Doing so would mean the the
1248 * request is not being tracked for completion but the work itself is
1249 * going to happen on the hardware. This would be a Bad Thing(tm).
1251 struct i915_request
*__i915_request_commit(struct i915_request
*rq
)
1253 struct intel_engine_cs
*engine
= rq
->engine
;
1254 struct intel_ring
*ring
= rq
->ring
;
1260 * To ensure that this call will not fail, space for its emissions
1261 * should already have been reserved in the ring buffer. Let the ring
1262 * know that it is time to use that space up.
1264 GEM_BUG_ON(rq
->reserved_space
> ring
->space
);
1265 rq
->reserved_space
= 0;
1266 rq
->emitted_jiffies
= jiffies
;
1269 * Record the position of the start of the breadcrumb so that
1270 * should we detect the updated seqno part-way through the
1271 * GPU processing the request, we never over-estimate the
1272 * position of the ring's HEAD.
1274 cs
= intel_ring_begin(rq
, engine
->emit_fini_breadcrumb_dw
);
1275 GEM_BUG_ON(IS_ERR(cs
));
1276 rq
->postfix
= intel_ring_offset(rq
, cs
);
1278 return __i915_request_add_to_timeline(rq
);
1281 void __i915_request_queue(struct i915_request
*rq
,
1282 const struct i915_sched_attr
*attr
)
1285 * Let the backend know a new request has arrived that may need
1286 * to adjust the existing execution schedule due to a high priority
1287 * request - i.e. we may want to preempt the current request in order
1288 * to run a high priority dependency chain *before* we can execute this
1291 * This is called before the request is ready to run so that we can
1292 * decide whether to preempt the entire chain so that it is ready to
1293 * run at the earliest possible convenience.
1295 i915_sw_fence_commit(&rq
->semaphore
);
1296 if (attr
&& rq
->engine
->schedule
)
1297 rq
->engine
->schedule(rq
, attr
);
1298 i915_sw_fence_commit(&rq
->submit
);
1301 void i915_request_add(struct i915_request
*rq
)
1303 struct intel_timeline
* const tl
= i915_request_timeline(rq
);
1304 struct i915_sched_attr attr
= {};
1305 struct i915_request
*prev
;
1307 lockdep_assert_held(&tl
->mutex
);
1308 lockdep_unpin_lock(&tl
->mutex
, rq
->cookie
);
1310 trace_i915_request_add(rq
);
1312 prev
= __i915_request_commit(rq
);
1314 if (rcu_access_pointer(rq
->context
->gem_context
))
1315 attr
= i915_request_gem_context(rq
)->sched
;
1318 * Boost actual workloads past semaphores!
1320 * With semaphores we spin on one engine waiting for another,
1321 * simply to reduce the latency of starting our work when
1322 * the signaler completes. However, if there is any other
1323 * work that we could be doing on this engine instead, that
1324 * is better utilisation and will reduce the overall duration
1325 * of the current work. To avoid PI boosting a semaphore
1326 * far in the distance past over useful work, we keep a history
1327 * of any semaphore use along our dependency chain.
1329 if (!(rq
->sched
.flags
& I915_SCHED_HAS_SEMAPHORE_CHAIN
))
1330 attr
.priority
|= I915_PRIORITY_NOSEMAPHORE
;
1333 * Boost priorities to new clients (new request flows).
1335 * Allow interactive/synchronous clients to jump ahead of
1336 * the bulk clients. (FQ_CODEL)
1338 if (list_empty(&rq
->sched
.signalers_list
))
1339 attr
.priority
|= I915_PRIORITY_WAIT
;
1342 __i915_request_queue(rq
, &attr
);
1343 local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
1346 * In typical scenarios, we do not expect the previous request on
1347 * the timeline to be still tracked by timeline->last_request if it
1348 * has been completed. If the completed request is still here, that
1349 * implies that request retirement is a long way behind submission,
1350 * suggesting that we haven't been retiring frequently enough from
1351 * the combination of retire-before-alloc, waiters and the background
1352 * retirement worker. So if the last request on this timeline was
1353 * already completed, do a catch up pass, flushing the retirement queue
1354 * up to this client. Since we have now moved the heaviest operations
1355 * during retirement onto secondary workers, such as freeing objects
1356 * or contexts, retiring a bunch of requests is mostly list management
1357 * (and cache misses), and so we should not be overly penalizing this
1358 * client by performing excess work, though we may still performing
1359 * work on behalf of others -- but instead we should benefit from
1360 * improved resource management. (Well, that's the theory at least.)
1363 i915_request_completed(prev
) &&
1364 rcu_access_pointer(prev
->timeline
) == tl
)
1365 i915_request_retire_upto(prev
);
1367 mutex_unlock(&tl
->mutex
);
1370 static unsigned long local_clock_us(unsigned int *cpu
)
1375 * Cheaply and approximately convert from nanoseconds to microseconds.
1376 * The result and subsequent calculations are also defined in the same
1377 * approximate microseconds units. The principal source of timing
1378 * error here is from the simple truncation.
1380 * Note that local_clock() is only defined wrt to the current CPU;
1381 * the comparisons are no longer valid if we switch CPUs. Instead of
1382 * blocking preemption for the entire busywait, we can detect the CPU
1383 * switch and use that as indicator of system load and a reason to
1384 * stop busywaiting, see busywait_stop().
1387 t
= local_clock() >> 10;
1393 static bool busywait_stop(unsigned long timeout
, unsigned int cpu
)
1395 unsigned int this_cpu
;
1397 if (time_after(local_clock_us(&this_cpu
), timeout
))
1400 return this_cpu
!= cpu
;
1403 static bool __i915_spin_request(const struct i915_request
* const rq
,
1404 int state
, unsigned long timeout_us
)
1409 * Only wait for the request if we know it is likely to complete.
1411 * We don't track the timestamps around requests, nor the average
1412 * request length, so we do not have a good indicator that this
1413 * request will complete within the timeout. What we do know is the
1414 * order in which requests are executed by the context and so we can
1415 * tell if the request has been started. If the request is not even
1416 * running yet, it is a fair assumption that it will not complete
1417 * within our relatively short timeout.
1419 if (!i915_request_is_running(rq
))
1423 * When waiting for high frequency requests, e.g. during synchronous
1424 * rendering split between the CPU and GPU, the finite amount of time
1425 * required to set up the irq and wait upon it limits the response
1426 * rate. By busywaiting on the request completion for a short while we
1427 * can service the high frequency waits as quick as possible. However,
1428 * if it is a slow request, we want to sleep as quickly as possible.
1429 * The tradeoff between waiting and sleeping is roughly the time it
1430 * takes to sleep on a request, on the order of a microsecond.
1433 timeout_us
+= local_clock_us(&cpu
);
1435 if (i915_request_completed(rq
))
1438 if (signal_pending_state(state
, current
))
1441 if (busywait_stop(timeout_us
, cpu
))
1445 } while (!need_resched());
1450 struct request_wait
{
1451 struct dma_fence_cb cb
;
1452 struct task_struct
*tsk
;
1455 static void request_wait_wake(struct dma_fence
*fence
, struct dma_fence_cb
*cb
)
1457 struct request_wait
*wait
= container_of(cb
, typeof(*wait
), cb
);
1459 wake_up_process(wait
->tsk
);
1463 * i915_request_wait - wait until execution of request has finished
1464 * @rq: the request to wait upon
1465 * @flags: how to wait
1466 * @timeout: how long to wait in jiffies
1468 * i915_request_wait() waits for the request to be completed, for a
1469 * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
1472 * Returns the remaining time (in jiffies) if the request completed, which may
1473 * be zero or -ETIME if the request is unfinished after the timeout expires.
1474 * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
1475 * pending before the request completes.
1477 long i915_request_wait(struct i915_request
*rq
,
1481 const int state
= flags
& I915_WAIT_INTERRUPTIBLE
?
1482 TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE
;
1483 struct request_wait wait
;
1486 GEM_BUG_ON(timeout
< 0);
1488 if (dma_fence_is_signaled(&rq
->fence
))
1494 trace_i915_request_wait_begin(rq
, flags
);
1497 * We must never wait on the GPU while holding a lock as we
1498 * may need to perform a GPU reset. So while we don't need to
1499 * serialise wait/reset with an explicit lock, we do want
1500 * lockdep to detect potential dependency cycles.
1502 mutex_acquire(&rq
->engine
->gt
->reset
.mutex
.dep_map
, 0, 0, _THIS_IP_
);
1505 * Optimistic spin before touching IRQs.
1507 * We may use a rather large value here to offset the penalty of
1508 * switching away from the active task. Frequently, the client will
1509 * wait upon an old swapbuffer to throttle itself to remain within a
1510 * frame of the gpu. If the client is running in lockstep with the gpu,
1511 * then it should not be waiting long at all, and a sleep now will incur
1512 * extra scheduler latency in producing the next frame. To try to
1513 * avoid adding the cost of enabling/disabling the interrupt to the
1514 * short wait, we first spin to see if the request would have completed
1515 * in the time taken to setup the interrupt.
1517 * We need upto 5us to enable the irq, and upto 20us to hide the
1518 * scheduler latency of a context switch, ignoring the secondary
1519 * impacts from a context switch such as cache eviction.
1521 * The scheme used for low-latency IO is called "hybrid interrupt
1522 * polling". The suggestion there is to sleep until just before you
1523 * expect to be woken by the device interrupt and then poll for its
1524 * completion. That requires having a good predictor for the request
1525 * duration, which we currently lack.
1527 if (IS_ACTIVE(CONFIG_DRM_I915_SPIN_REQUEST
) &&
1528 __i915_spin_request(rq
, state
, CONFIG_DRM_I915_SPIN_REQUEST
)) {
1529 dma_fence_signal(&rq
->fence
);
1534 * This client is about to stall waiting for the GPU. In many cases
1535 * this is undesirable and limits the throughput of the system, as
1536 * many clients cannot continue processing user input/output whilst
1537 * blocked. RPS autotuning may take tens of milliseconds to respond
1538 * to the GPU load and thus incurs additional latency for the client.
1539 * We can circumvent that by promoting the GPU frequency to maximum
1540 * before we sleep. This makes the GPU throttle up much more quickly
1541 * (good for benchmarks and user experience, e.g. window animations),
1542 * but at a cost of spending more power processing the workload
1543 * (bad for battery).
1545 if (flags
& I915_WAIT_PRIORITY
) {
1546 if (!i915_request_started(rq
) && INTEL_GEN(rq
->i915
) >= 6)
1547 intel_rps_boost(rq
);
1548 i915_schedule_bump_priority(rq
, I915_PRIORITY_WAIT
);
1552 if (dma_fence_add_callback(&rq
->fence
, &wait
.cb
, request_wait_wake
))
1556 set_current_state(state
);
1558 if (i915_request_completed(rq
)) {
1559 dma_fence_signal(&rq
->fence
);
1563 if (signal_pending_state(state
, current
)) {
1564 timeout
= -ERESTARTSYS
;
1573 intel_engine_flush_submission(rq
->engine
);
1574 timeout
= io_schedule_timeout(timeout
);
1576 __set_current_state(TASK_RUNNING
);
1578 dma_fence_remove_callback(&rq
->fence
, &wait
.cb
);
1581 mutex_release(&rq
->engine
->gt
->reset
.mutex
.dep_map
, _THIS_IP_
);
1582 trace_i915_request_wait_end(rq
);
1586 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1587 #include "selftests/mock_request.c"
1588 #include "selftests/i915_request.c"
1591 static void i915_global_request_shrink(void)
1593 kmem_cache_shrink(global
.slab_dependencies
);
1594 kmem_cache_shrink(global
.slab_execute_cbs
);
1595 kmem_cache_shrink(global
.slab_requests
);
1598 static void i915_global_request_exit(void)
1600 kmem_cache_destroy(global
.slab_dependencies
);
1601 kmem_cache_destroy(global
.slab_execute_cbs
);
1602 kmem_cache_destroy(global
.slab_requests
);
1605 static struct i915_global_request global
= { {
1606 .shrink
= i915_global_request_shrink
,
1607 .exit
= i915_global_request_exit
,
1610 int __init
i915_global_request_init(void)
1612 global
.slab_requests
=
1613 kmem_cache_create("i915_request",
1614 sizeof(struct i915_request
),
1615 __alignof__(struct i915_request
),
1616 SLAB_HWCACHE_ALIGN
|
1617 SLAB_RECLAIM_ACCOUNT
|
1618 SLAB_TYPESAFE_BY_RCU
,
1619 __i915_request_ctor
);
1620 if (!global
.slab_requests
)
1623 global
.slab_execute_cbs
= KMEM_CACHE(execute_cb
,
1624 SLAB_HWCACHE_ALIGN
|
1625 SLAB_RECLAIM_ACCOUNT
|
1626 SLAB_TYPESAFE_BY_RCU
);
1627 if (!global
.slab_execute_cbs
)
1630 global
.slab_dependencies
= KMEM_CACHE(i915_dependency
,
1631 SLAB_HWCACHE_ALIGN
|
1632 SLAB_RECLAIM_ACCOUNT
);
1633 if (!global
.slab_dependencies
)
1634 goto err_execute_cbs
;
1636 i915_global_register(&global
.base
);
1640 kmem_cache_destroy(global
.slab_execute_cbs
);
1642 kmem_cache_destroy(global
.slab_requests
);