2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include <linux/mutex.h>
10 #include "i915_globals.h"
11 #include "i915_request.h"
12 #include "i915_scheduler.h"
14 static struct i915_global_scheduler
{
15 struct i915_global base
;
16 struct kmem_cache
*slab_dependencies
;
17 struct kmem_cache
*slab_priorities
;
20 static DEFINE_SPINLOCK(schedule_lock
);
22 static const struct i915_request
*
23 node_to_request(const struct i915_sched_node
*node
)
25 return container_of(node
, const struct i915_request
, sched
);
28 static inline bool node_started(const struct i915_sched_node
*node
)
30 return i915_request_started(node_to_request(node
));
33 static inline bool node_signaled(const struct i915_sched_node
*node
)
35 return i915_request_completed(node_to_request(node
));
38 static inline struct i915_priolist
*to_priolist(struct rb_node
*rb
)
40 return rb_entry(rb
, struct i915_priolist
, node
);
43 static void assert_priolists(struct intel_engine_execlists
* const execlists
)
48 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM
))
51 GEM_BUG_ON(rb_first_cached(&execlists
->queue
) !=
52 rb_first(&execlists
->queue
.rb_root
));
55 for (rb
= rb_first_cached(&execlists
->queue
); rb
; rb
= rb_next(rb
)) {
56 const struct i915_priolist
*p
= to_priolist(rb
);
58 GEM_BUG_ON(p
->priority
> last_prio
);
59 last_prio
= p
->priority
;
62 for (i
= 0; i
< ARRAY_SIZE(p
->requests
); i
++) {
63 if (list_empty(&p
->requests
[i
]))
66 GEM_BUG_ON(!(p
->used
& BIT(i
)));
72 i915_sched_lookup_priolist(struct intel_engine_cs
*engine
, int prio
)
74 struct intel_engine_execlists
* const execlists
= &engine
->execlists
;
75 struct i915_priolist
*p
;
76 struct rb_node
**parent
, *rb
;
80 lockdep_assert_held(&engine
->active
.lock
);
81 assert_priolists(execlists
);
83 /* buckets sorted from highest [in slot 0] to lowest priority */
84 idx
= I915_PRIORITY_COUNT
- (prio
& I915_PRIORITY_MASK
) - 1;
85 prio
>>= I915_USER_PRIORITY_SHIFT
;
86 if (unlikely(execlists
->no_priolist
))
87 prio
= I915_PRIORITY_NORMAL
;
90 /* most positive priority is scheduled first, equal priorities fifo */
92 parent
= &execlists
->queue
.rb_root
.rb_node
;
96 if (prio
> p
->priority
) {
97 parent
= &rb
->rb_left
;
98 } else if (prio
< p
->priority
) {
99 parent
= &rb
->rb_right
;
106 if (prio
== I915_PRIORITY_NORMAL
) {
107 p
= &execlists
->default_priolist
;
109 p
= kmem_cache_alloc(global
.slab_priorities
, GFP_ATOMIC
);
110 /* Convert an allocation failure to a priority bump */
112 prio
= I915_PRIORITY_NORMAL
; /* recurses just once */
114 /* To maintain ordering with all rendering, after an
115 * allocation failure we have to disable all scheduling.
116 * Requests will then be executed in fifo, and schedule
117 * will ensure that dependencies are emitted in fifo.
118 * There will be still some reordering with existing
119 * requests, so if userspace lied about their
120 * dependencies that reordering may be visible.
122 execlists
->no_priolist
= true;
128 for (i
= 0; i
< ARRAY_SIZE(p
->requests
); i
++)
129 INIT_LIST_HEAD(&p
->requests
[i
]);
130 rb_link_node(&p
->node
, rb
, parent
);
131 rb_insert_color_cached(&p
->node
, &execlists
->queue
, first
);
136 return &p
->requests
[idx
];
139 void __i915_priolist_free(struct i915_priolist
*p
)
141 kmem_cache_free(global
.slab_priorities
, p
);
145 struct list_head
*priolist
;
148 static struct intel_engine_cs
*
149 sched_lock_engine(const struct i915_sched_node
*node
,
150 struct intel_engine_cs
*locked
,
151 struct sched_cache
*cache
)
153 const struct i915_request
*rq
= node_to_request(node
);
154 struct intel_engine_cs
*engine
;
159 * Virtual engines complicate acquiring the engine timeline lock,
160 * as their rq->engine pointer is not stable until under that
161 * engine lock. The simple ploy we use is to take the lock then
162 * check that the rq still belongs to the newly locked engine.
164 while (locked
!= (engine
= READ_ONCE(rq
->engine
))) {
165 spin_unlock(&locked
->active
.lock
);
166 memset(cache
, 0, sizeof(*cache
));
167 spin_lock(&engine
->active
.lock
);
171 GEM_BUG_ON(locked
!= engine
);
175 static inline int rq_prio(const struct i915_request
*rq
)
177 return rq
->sched
.attr
.priority
;
180 static inline bool need_preempt(int prio
, int active
)
183 * Allow preemption of low -> normal -> high, but we do
184 * not allow low priority tasks to preempt other low priority
185 * tasks under the impression that latency for low priority
186 * tasks does not matter (as much as background throughput),
189 return prio
>= max(I915_PRIORITY_NORMAL
, active
);
192 static void kick_submission(struct intel_engine_cs
*engine
,
193 const struct i915_request
*rq
,
196 const struct i915_request
*inflight
;
199 * We only need to kick the tasklet once for the high priority
200 * new context we add into the queue.
202 if (prio
<= engine
->execlists
.queue_priority_hint
)
207 /* Nothing currently active? We're overdue for a submission! */
208 inflight
= execlists_active(&engine
->execlists
);
213 * If we are already the currently executing context, don't
214 * bother evaluating if we should preempt ourselves.
216 if (inflight
->context
== rq
->context
)
220 "bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n",
222 rq
->fence
.context
, rq
->fence
.seqno
,
223 inflight
->fence
.context
, inflight
->fence
.seqno
,
224 inflight
->sched
.attr
.priority
);
226 engine
->execlists
.queue_priority_hint
= prio
;
227 if (need_preempt(prio
, rq_prio(inflight
)))
228 tasklet_hi_schedule(&engine
->execlists
.tasklet
);
234 static void __i915_schedule(struct i915_sched_node
*node
,
235 const struct i915_sched_attr
*attr
)
237 const int prio
= max(attr
->priority
, node
->attr
.priority
);
238 struct intel_engine_cs
*engine
;
239 struct i915_dependency
*dep
, *p
;
240 struct i915_dependency stack
;
241 struct sched_cache cache
;
244 /* Needed in order to use the temporary link inside i915_dependency */
245 lockdep_assert_held(&schedule_lock
);
246 GEM_BUG_ON(prio
== I915_PRIORITY_INVALID
);
248 if (node_signaled(node
))
251 stack
.signaler
= node
;
252 list_add(&stack
.dfs_link
, &dfs
);
255 * Recursively bump all dependent priorities to match the new request.
257 * A naive approach would be to use recursion:
258 * static void update_priorities(struct i915_sched_node *node, prio) {
259 * list_for_each_entry(dep, &node->signalers_list, signal_link)
260 * update_priorities(dep->signal, prio)
261 * queue_request(node);
263 * but that may have unlimited recursion depth and so runs a very
264 * real risk of overunning the kernel stack. Instead, we build
265 * a flat list of all dependencies starting with the current request.
266 * As we walk the list of dependencies, we add all of its dependencies
267 * to the end of the list (this may include an already visited
268 * request) and continue to walk onwards onto the new dependencies. The
269 * end result is a topological list of requests in reverse order, the
270 * last element in the list is the request we must execute first.
272 list_for_each_entry(dep
, &dfs
, dfs_link
) {
273 struct i915_sched_node
*node
= dep
->signaler
;
275 /* If we are already flying, we know we have no signalers */
276 if (node_started(node
))
280 * Within an engine, there can be no cycle, but we may
281 * refer to the same dependency chain multiple times
282 * (redundant dependencies are not eliminated) and across
285 list_for_each_entry(p
, &node
->signalers_list
, signal_link
) {
286 GEM_BUG_ON(p
== dep
); /* no cycles! */
288 if (node_signaled(p
->signaler
))
291 if (prio
> READ_ONCE(p
->signaler
->attr
.priority
))
292 list_move_tail(&p
->dfs_link
, &dfs
);
297 * If we didn't need to bump any existing priorities, and we haven't
298 * yet submitted this request (i.e. there is no potential race with
299 * execlists_submit_request()), we can set our own priority and skip
300 * acquiring the engine locks.
302 if (node
->attr
.priority
== I915_PRIORITY_INVALID
) {
303 GEM_BUG_ON(!list_empty(&node
->link
));
306 if (stack
.dfs_link
.next
== stack
.dfs_link
.prev
)
309 __list_del_entry(&stack
.dfs_link
);
312 memset(&cache
, 0, sizeof(cache
));
313 engine
= node_to_request(node
)->engine
;
314 spin_lock(&engine
->active
.lock
);
316 /* Fifo and depth-first replacement ensure our deps execute before us */
317 engine
= sched_lock_engine(node
, engine
, &cache
);
318 list_for_each_entry_safe_reverse(dep
, p
, &dfs
, dfs_link
) {
319 INIT_LIST_HEAD(&dep
->dfs_link
);
321 node
= dep
->signaler
;
322 engine
= sched_lock_engine(node
, engine
, &cache
);
323 lockdep_assert_held(&engine
->active
.lock
);
325 /* Recheck after acquiring the engine->timeline.lock */
326 if (prio
<= node
->attr
.priority
|| node_signaled(node
))
329 GEM_BUG_ON(node_to_request(node
)->engine
!= engine
);
331 WRITE_ONCE(node
->attr
.priority
, prio
);
334 * Once the request is ready, it will be placed into the
335 * priority lists and then onto the HW runlist. Before the
336 * request is ready, it does not contribute to our preemption
337 * decisions and we can safely ignore it, as it will, and
338 * any preemption required, be dealt with upon submission.
339 * See engine->submit_request()
341 if (list_empty(&node
->link
))
344 if (i915_request_in_priority_queue(node_to_request(node
))) {
347 i915_sched_lookup_priolist(engine
,
349 list_move_tail(&node
->link
, cache
.priolist
);
352 /* Defer (tasklet) submission until after all of our updates. */
353 kick_submission(engine
, node_to_request(node
), prio
);
356 spin_unlock(&engine
->active
.lock
);
359 void i915_schedule(struct i915_request
*rq
, const struct i915_sched_attr
*attr
)
361 spin_lock_irq(&schedule_lock
);
362 __i915_schedule(&rq
->sched
, attr
);
363 spin_unlock_irq(&schedule_lock
);
366 static void __bump_priority(struct i915_sched_node
*node
, unsigned int bump
)
368 struct i915_sched_attr attr
= node
->attr
;
370 if (attr
.priority
& bump
)
373 attr
.priority
|= bump
;
374 __i915_schedule(node
, &attr
);
377 void i915_schedule_bump_priority(struct i915_request
*rq
, unsigned int bump
)
381 GEM_BUG_ON(bump
& ~I915_PRIORITY_MASK
);
382 if (READ_ONCE(rq
->sched
.attr
.priority
) & bump
)
385 spin_lock_irqsave(&schedule_lock
, flags
);
386 __bump_priority(&rq
->sched
, bump
);
387 spin_unlock_irqrestore(&schedule_lock
, flags
);
390 void i915_sched_node_init(struct i915_sched_node
*node
)
392 INIT_LIST_HEAD(&node
->signalers_list
);
393 INIT_LIST_HEAD(&node
->waiters_list
);
394 INIT_LIST_HEAD(&node
->link
);
396 i915_sched_node_reinit(node
);
399 void i915_sched_node_reinit(struct i915_sched_node
*node
)
401 node
->attr
.priority
= I915_PRIORITY_INVALID
;
402 node
->semaphores
= 0;
405 GEM_BUG_ON(!list_empty(&node
->signalers_list
));
406 GEM_BUG_ON(!list_empty(&node
->waiters_list
));
407 GEM_BUG_ON(!list_empty(&node
->link
));
410 static struct i915_dependency
*
411 i915_dependency_alloc(void)
413 return kmem_cache_alloc(global
.slab_dependencies
, GFP_KERNEL
);
417 i915_dependency_free(struct i915_dependency
*dep
)
419 kmem_cache_free(global
.slab_dependencies
, dep
);
422 bool __i915_sched_node_add_dependency(struct i915_sched_node
*node
,
423 struct i915_sched_node
*signal
,
424 struct i915_dependency
*dep
,
429 spin_lock_irq(&schedule_lock
);
431 if (!node_signaled(signal
)) {
432 INIT_LIST_HEAD(&dep
->dfs_link
);
433 dep
->signaler
= signal
;
437 /* All set, now publish. Beware the lockless walkers. */
438 list_add_rcu(&dep
->signal_link
, &node
->signalers_list
);
439 list_add_rcu(&dep
->wait_link
, &signal
->waiters_list
);
441 /* Propagate the chains */
442 node
->flags
|= signal
->flags
;
446 spin_unlock_irq(&schedule_lock
);
451 int i915_sched_node_add_dependency(struct i915_sched_node
*node
,
452 struct i915_sched_node
*signal
,
455 struct i915_dependency
*dep
;
457 dep
= i915_dependency_alloc();
463 if (!__i915_sched_node_add_dependency(node
, signal
, dep
,
464 flags
| I915_DEPENDENCY_ALLOC
))
465 i915_dependency_free(dep
);
467 local_bh_enable(); /* kick submission tasklet */
472 void i915_sched_node_fini(struct i915_sched_node
*node
)
474 struct i915_dependency
*dep
, *tmp
;
476 spin_lock_irq(&schedule_lock
);
479 * Everyone we depended upon (the fences we wait to be signaled)
480 * should retire before us and remove themselves from our list.
481 * However, retirement is run independently on each timeline and
482 * so we may be called out-of-order.
484 list_for_each_entry_safe(dep
, tmp
, &node
->signalers_list
, signal_link
) {
485 GEM_BUG_ON(!list_empty(&dep
->dfs_link
));
487 list_del_rcu(&dep
->wait_link
);
488 if (dep
->flags
& I915_DEPENDENCY_ALLOC
)
489 i915_dependency_free(dep
);
491 INIT_LIST_HEAD(&node
->signalers_list
);
493 /* Remove ourselves from everyone who depends upon us */
494 list_for_each_entry_safe(dep
, tmp
, &node
->waiters_list
, wait_link
) {
495 GEM_BUG_ON(dep
->signaler
!= node
);
496 GEM_BUG_ON(!list_empty(&dep
->dfs_link
));
498 list_del_rcu(&dep
->signal_link
);
499 if (dep
->flags
& I915_DEPENDENCY_ALLOC
)
500 i915_dependency_free(dep
);
502 INIT_LIST_HEAD(&node
->waiters_list
);
504 spin_unlock_irq(&schedule_lock
);
507 static void i915_global_scheduler_shrink(void)
509 kmem_cache_shrink(global
.slab_dependencies
);
510 kmem_cache_shrink(global
.slab_priorities
);
513 static void i915_global_scheduler_exit(void)
515 kmem_cache_destroy(global
.slab_dependencies
);
516 kmem_cache_destroy(global
.slab_priorities
);
519 static struct i915_global_scheduler global
= { {
520 .shrink
= i915_global_scheduler_shrink
,
521 .exit
= i915_global_scheduler_exit
,
524 int __init
i915_global_scheduler_init(void)
526 global
.slab_dependencies
= KMEM_CACHE(i915_dependency
,
528 SLAB_TYPESAFE_BY_RCU
);
529 if (!global
.slab_dependencies
)
532 global
.slab_priorities
= KMEM_CACHE(i915_priolist
,
534 if (!global
.slab_priorities
)
537 i915_global_register(&global
.base
);
541 kmem_cache_destroy(global
.slab_priorities
);