2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 * The GPU scheduler provides entities which allow userspace to push jobs
28 * into software queues which are then scheduled on a hardware run queue.
29 * The software queues have a priority among them. The scheduler selects the entities
30 * from the run queue using a FIFO. The scheduler provides dependency handling
31 * features among jobs. The driver is supposed to provide callback functions for
32 * backend operations to the scheduler like submitting a job to hardware run queue,
33 * returning the dependencies of a job etc.
35 * The organisation of the scheduler is the following:
37 * 1. Each hw run queue has one scheduler
38 * 2. Each scheduler has multiple run queues with different priorities
39 * (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40 * 3. Each scheduler run queue has a queue of entities to schedule
41 * 4. Entities themselves maintain a queue of jobs that will be scheduled on
44 * The jobs in a entity are always scheduled in the order that they were pushed.
47 #include <linux/kthread.h>
48 #include <linux/wait.h>
49 #include <linux/sched.h>
50 #include <uapi/linux/sched/types.h>
52 #include <drm/gpu_scheduler.h>
53 #include <drm/spsc_queue.h>
55 #define CREATE_TRACE_POINTS
56 #include "gpu_scheduler_trace.h"
58 #define to_drm_sched_job(sched_job) \
59 container_of((sched_job), struct drm_sched_job, queue_node)
61 static void drm_sched_process_job(struct dma_fence
*f
, struct dma_fence_cb
*cb
);
64 * drm_sched_rq_init - initialize a given run queue struct
66 * @rq: scheduler run queue
68 * Initializes a scheduler runqueue.
70 static void drm_sched_rq_init(struct drm_gpu_scheduler
*sched
,
71 struct drm_sched_rq
*rq
)
73 spin_lock_init(&rq
->lock
);
74 INIT_LIST_HEAD(&rq
->entities
);
75 rq
->current_entity
= NULL
;
80 * drm_sched_rq_add_entity - add an entity
82 * @rq: scheduler run queue
83 * @entity: scheduler entity
85 * Adds a scheduler entity to the run queue.
87 void drm_sched_rq_add_entity(struct drm_sched_rq
*rq
,
88 struct drm_sched_entity
*entity
)
90 if (!list_empty(&entity
->list
))
93 list_add_tail(&entity
->list
, &rq
->entities
);
94 spin_unlock(&rq
->lock
);
98 * drm_sched_rq_remove_entity - remove an entity
100 * @rq: scheduler run queue
101 * @entity: scheduler entity
103 * Removes a scheduler entity from the run queue.
105 void drm_sched_rq_remove_entity(struct drm_sched_rq
*rq
,
106 struct drm_sched_entity
*entity
)
108 if (list_empty(&entity
->list
))
110 spin_lock(&rq
->lock
);
111 list_del_init(&entity
->list
);
112 if (rq
->current_entity
== entity
)
113 rq
->current_entity
= NULL
;
114 spin_unlock(&rq
->lock
);
118 * drm_sched_rq_select_entity - Select an entity which could provide a job to run
120 * @rq: scheduler run queue to check.
122 * Try to find a ready entity, returns NULL if none found.
124 static struct drm_sched_entity
*
125 drm_sched_rq_select_entity(struct drm_sched_rq
*rq
)
127 struct drm_sched_entity
*entity
;
129 spin_lock(&rq
->lock
);
131 entity
= rq
->current_entity
;
133 list_for_each_entry_continue(entity
, &rq
->entities
, list
) {
134 if (drm_sched_entity_is_ready(entity
)) {
135 rq
->current_entity
= entity
;
136 spin_unlock(&rq
->lock
);
142 list_for_each_entry(entity
, &rq
->entities
, list
) {
144 if (drm_sched_entity_is_ready(entity
)) {
145 rq
->current_entity
= entity
;
146 spin_unlock(&rq
->lock
);
150 if (entity
== rq
->current_entity
)
154 spin_unlock(&rq
->lock
);
160 * drm_sched_dependency_optimized
162 * @fence: the dependency fence
163 * @entity: the entity which depends on the above fence
165 * Returns true if the dependency can be optimized and false otherwise
167 bool drm_sched_dependency_optimized(struct dma_fence
* fence
,
168 struct drm_sched_entity
*entity
)
170 struct drm_gpu_scheduler
*sched
= entity
->rq
->sched
;
171 struct drm_sched_fence
*s_fence
;
173 if (!fence
|| dma_fence_is_signaled(fence
))
175 if (fence
->context
== entity
->fence_context
)
177 s_fence
= to_drm_sched_fence(fence
);
178 if (s_fence
&& s_fence
->sched
== sched
)
183 EXPORT_SYMBOL(drm_sched_dependency_optimized
);
186 * drm_sched_start_timeout - start timeout for reset worker
188 * @sched: scheduler instance to start the worker for
190 * Start the timeout for the given scheduler.
192 static void drm_sched_start_timeout(struct drm_gpu_scheduler
*sched
)
194 if (sched
->timeout
!= MAX_SCHEDULE_TIMEOUT
&&
195 !list_empty(&sched
->ring_mirror_list
))
196 schedule_delayed_work(&sched
->work_tdr
, sched
->timeout
);
200 * drm_sched_fault - immediately start timeout handler
202 * @sched: scheduler where the timeout handling should be started.
204 * Start timeout handling immediately when the driver detects a hardware fault.
206 void drm_sched_fault(struct drm_gpu_scheduler
*sched
)
208 mod_delayed_work(system_wq
, &sched
->work_tdr
, 0);
210 EXPORT_SYMBOL(drm_sched_fault
);
213 * drm_sched_suspend_timeout - Suspend scheduler job timeout
215 * @sched: scheduler instance for which to suspend the timeout
217 * Suspend the delayed work timeout for the scheduler. This is done by
218 * modifying the delayed work timeout to an arbitrary large value,
219 * MAX_SCHEDULE_TIMEOUT in this case. Note that this function can be
220 * called from an IRQ context.
222 * Returns the timeout remaining
225 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler
*sched
)
227 unsigned long sched_timeout
, now
= jiffies
;
229 sched_timeout
= sched
->work_tdr
.timer
.expires
;
232 * Modify the timeout to an arbitrarily large value. This also prevents
233 * the timeout to be restarted when new submissions arrive
235 if (mod_delayed_work(system_wq
, &sched
->work_tdr
, MAX_SCHEDULE_TIMEOUT
)
236 && time_after(sched_timeout
, now
))
237 return sched_timeout
- now
;
239 return sched
->timeout
;
241 EXPORT_SYMBOL(drm_sched_suspend_timeout
);
244 * drm_sched_resume_timeout - Resume scheduler job timeout
246 * @sched: scheduler instance for which to resume the timeout
247 * @remaining: remaining timeout
249 * Resume the delayed work timeout for the scheduler. Note that
250 * this function can be called from an IRQ context.
252 void drm_sched_resume_timeout(struct drm_gpu_scheduler
*sched
,
253 unsigned long remaining
)
257 spin_lock_irqsave(&sched
->job_list_lock
, flags
);
259 if (list_empty(&sched
->ring_mirror_list
))
260 cancel_delayed_work(&sched
->work_tdr
);
262 mod_delayed_work(system_wq
, &sched
->work_tdr
, remaining
);
264 spin_unlock_irqrestore(&sched
->job_list_lock
, flags
);
266 EXPORT_SYMBOL(drm_sched_resume_timeout
);
268 /* job_finish is called after hw fence signaled
270 static void drm_sched_job_finish(struct work_struct
*work
)
272 struct drm_sched_job
*s_job
= container_of(work
, struct drm_sched_job
,
274 struct drm_gpu_scheduler
*sched
= s_job
->sched
;
278 * Canceling the timeout without removing our job from the ring mirror
279 * list is safe, as we will only end up in this worker if our jobs
280 * finished fence has been signaled. So even if some another worker
281 * manages to find this job as the next job in the list, the fence
282 * signaled check below will prevent the timeout to be restarted.
284 cancel_delayed_work_sync(&sched
->work_tdr
);
286 spin_lock_irqsave(&sched
->job_list_lock
, flags
);
287 /* queue TDR for next job */
288 drm_sched_start_timeout(sched
);
289 spin_unlock_irqrestore(&sched
->job_list_lock
, flags
);
291 sched
->ops
->free_job(s_job
);
294 static void drm_sched_job_begin(struct drm_sched_job
*s_job
)
296 struct drm_gpu_scheduler
*sched
= s_job
->sched
;
299 spin_lock_irqsave(&sched
->job_list_lock
, flags
);
300 list_add_tail(&s_job
->node
, &sched
->ring_mirror_list
);
301 drm_sched_start_timeout(sched
);
302 spin_unlock_irqrestore(&sched
->job_list_lock
, flags
);
305 static void drm_sched_job_timedout(struct work_struct
*work
)
307 struct drm_gpu_scheduler
*sched
;
308 struct drm_sched_job
*job
;
311 sched
= container_of(work
, struct drm_gpu_scheduler
, work_tdr
.work
);
312 job
= list_first_entry_or_null(&sched
->ring_mirror_list
,
313 struct drm_sched_job
, node
);
316 job
->sched
->ops
->timedout_job(job
);
318 spin_lock_irqsave(&sched
->job_list_lock
, flags
);
319 drm_sched_start_timeout(sched
);
320 spin_unlock_irqrestore(&sched
->job_list_lock
, flags
);
324 * drm_sched_increase_karma - Update sched_entity guilty flag
326 * @bad: The job guilty of time out
328 * Increment on every hang caused by the 'bad' job. If this exceeds the hang
329 * limit of the scheduler then the respective sched entity is marked guilty and
330 * jobs from it will not be scheduled further
332 void drm_sched_increase_karma(struct drm_sched_job
*bad
)
335 struct drm_sched_entity
*tmp
;
336 struct drm_sched_entity
*entity
;
337 struct drm_gpu_scheduler
*sched
= bad
->sched
;
339 /* don't increase @bad's karma if it's from KERNEL RQ,
340 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
341 * corrupt but keep in mind that kernel jobs always considered good.
343 if (bad
->s_priority
!= DRM_SCHED_PRIORITY_KERNEL
) {
344 atomic_inc(&bad
->karma
);
345 for (i
= DRM_SCHED_PRIORITY_MIN
; i
< DRM_SCHED_PRIORITY_KERNEL
;
347 struct drm_sched_rq
*rq
= &sched
->sched_rq
[i
];
349 spin_lock(&rq
->lock
);
350 list_for_each_entry_safe(entity
, tmp
, &rq
->entities
, list
) {
351 if (bad
->s_fence
->scheduled
.context
==
352 entity
->fence_context
) {
353 if (atomic_read(&bad
->karma
) >
354 bad
->sched
->hang_limit
)
356 atomic_set(entity
->guilty
, 1);
360 spin_unlock(&rq
->lock
);
361 if (&entity
->list
!= &rq
->entities
)
366 EXPORT_SYMBOL(drm_sched_increase_karma
);
369 * drm_sched_hw_job_reset - stop the scheduler if it contains the bad job
371 * @sched: scheduler instance
372 * @bad: bad scheduler job
375 void drm_sched_stop(struct drm_gpu_scheduler
*sched
)
377 struct drm_sched_job
*s_job
;
379 struct dma_fence
*last_fence
= NULL
;
381 kthread_park(sched
->thread
);
384 * Verify all the signaled jobs in mirror list are removed from the ring
385 * by waiting for the latest job to enter the list. This should insure that
386 * also all the previous jobs that were in flight also already singaled
387 * and removed from the list.
389 spin_lock_irqsave(&sched
->job_list_lock
, flags
);
390 list_for_each_entry_reverse(s_job
, &sched
->ring_mirror_list
, node
) {
391 if (s_job
->s_fence
->parent
&&
392 dma_fence_remove_callback(s_job
->s_fence
->parent
,
394 dma_fence_put(s_job
->s_fence
->parent
);
395 s_job
->s_fence
->parent
= NULL
;
396 atomic_dec(&sched
->hw_rq_count
);
398 last_fence
= dma_fence_get(&s_job
->s_fence
->finished
);
402 spin_unlock_irqrestore(&sched
->job_list_lock
, flags
);
405 dma_fence_wait(last_fence
, false);
406 dma_fence_put(last_fence
);
410 EXPORT_SYMBOL(drm_sched_stop
);
413 * drm_sched_job_recovery - recover jobs after a reset
415 * @sched: scheduler instance
418 void drm_sched_start(struct drm_gpu_scheduler
*sched
, bool full_recovery
)
420 struct drm_sched_job
*s_job
, *tmp
;
427 * Locking the list is not required here as the sched thread is parked
428 * so no new jobs are being pushed in to HW and in drm_sched_stop we
429 * flushed all the jobs who were still in mirror list but who already
430 * signaled and removed them self from the list. Also concurrent
431 * GPU recovers can't run in parallel.
433 list_for_each_entry_safe(s_job
, tmp
, &sched
->ring_mirror_list
, node
) {
434 struct dma_fence
*fence
= s_job
->s_fence
->parent
;
437 r
= dma_fence_add_callback(fence
, &s_job
->cb
,
438 drm_sched_process_job
);
440 drm_sched_process_job(fence
, &s_job
->cb
);
442 DRM_ERROR("fence add callback failed (%d)\n",
445 drm_sched_process_job(NULL
, &s_job
->cb
);
448 drm_sched_start_timeout(sched
);
451 kthread_unpark(sched
->thread
);
453 EXPORT_SYMBOL(drm_sched_start
);
456 * drm_sched_resubmit_jobs - helper to relunch job from mirror ring list
458 * @sched: scheduler instance
461 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler
*sched
)
463 struct drm_sched_job
*s_job
, *tmp
;
464 uint64_t guilty_context
;
465 bool found_guilty
= false;
467 /*TODO DO we need spinlock here ? */
468 list_for_each_entry_safe(s_job
, tmp
, &sched
->ring_mirror_list
, node
) {
469 struct drm_sched_fence
*s_fence
= s_job
->s_fence
;
471 if (!found_guilty
&& atomic_read(&s_job
->karma
) > sched
->hang_limit
) {
473 guilty_context
= s_job
->s_fence
->scheduled
.context
;
476 if (found_guilty
&& s_job
->s_fence
->scheduled
.context
== guilty_context
)
477 dma_fence_set_error(&s_fence
->finished
, -ECANCELED
);
479 s_job
->s_fence
->parent
= sched
->ops
->run_job(s_job
);
480 atomic_inc(&sched
->hw_rq_count
);
483 EXPORT_SYMBOL(drm_sched_resubmit_jobs
);
486 * drm_sched_job_init - init a scheduler job
488 * @job: scheduler job to init
489 * @entity: scheduler entity to use
490 * @owner: job owner for debugging
492 * Refer to drm_sched_entity_push_job() documentation
493 * for locking considerations.
495 * Returns 0 for success, negative error code otherwise.
497 int drm_sched_job_init(struct drm_sched_job
*job
,
498 struct drm_sched_entity
*entity
,
501 struct drm_gpu_scheduler
*sched
;
503 drm_sched_entity_select_rq(entity
);
507 sched
= entity
->rq
->sched
;
510 job
->entity
= entity
;
511 job
->s_priority
= entity
->rq
- sched
->sched_rq
;
512 job
->s_fence
= drm_sched_fence_create(entity
, owner
);
515 job
->id
= atomic64_inc_return(&sched
->job_id_count
);
517 INIT_WORK(&job
->finish_work
, drm_sched_job_finish
);
518 INIT_LIST_HEAD(&job
->node
);
522 EXPORT_SYMBOL(drm_sched_job_init
);
525 * drm_sched_job_cleanup - clean up scheduler job resources
527 * @job: scheduler job to clean up
529 void drm_sched_job_cleanup(struct drm_sched_job
*job
)
531 dma_fence_put(&job
->s_fence
->finished
);
534 EXPORT_SYMBOL(drm_sched_job_cleanup
);
537 * drm_sched_ready - is the scheduler ready
539 * @sched: scheduler instance
541 * Return true if we can push more jobs to the hw, otherwise false.
543 static bool drm_sched_ready(struct drm_gpu_scheduler
*sched
)
545 return atomic_read(&sched
->hw_rq_count
) <
546 sched
->hw_submission_limit
;
550 * drm_sched_wakeup - Wake up the scheduler when it is ready
552 * @sched: scheduler instance
555 void drm_sched_wakeup(struct drm_gpu_scheduler
*sched
)
557 if (drm_sched_ready(sched
))
558 wake_up_interruptible(&sched
->wake_up_worker
);
562 * drm_sched_select_entity - Select next entity to process
564 * @sched: scheduler instance
566 * Returns the entity to process or NULL if none are found.
568 static struct drm_sched_entity
*
569 drm_sched_select_entity(struct drm_gpu_scheduler
*sched
)
571 struct drm_sched_entity
*entity
;
574 if (!drm_sched_ready(sched
))
577 /* Kernel run queue has higher priority than normal run queue*/
578 for (i
= DRM_SCHED_PRIORITY_MAX
- 1; i
>= DRM_SCHED_PRIORITY_MIN
; i
--) {
579 entity
= drm_sched_rq_select_entity(&sched
->sched_rq
[i
]);
588 * drm_sched_process_job - process a job
591 * @cb: fence callbacks
593 * Called after job has finished execution.
595 static void drm_sched_process_job(struct dma_fence
*f
, struct dma_fence_cb
*cb
)
597 struct drm_sched_job
*s_job
= container_of(cb
, struct drm_sched_job
, cb
);
598 struct drm_sched_fence
*s_fence
= s_job
->s_fence
;
599 struct drm_gpu_scheduler
*sched
= s_fence
->sched
;
602 cancel_delayed_work(&sched
->work_tdr
);
604 atomic_dec(&sched
->hw_rq_count
);
605 atomic_dec(&sched
->num_jobs
);
607 spin_lock_irqsave(&sched
->job_list_lock
, flags
);
608 /* remove job from ring_mirror_list */
609 list_del_init(&s_job
->node
);
610 spin_unlock_irqrestore(&sched
->job_list_lock
, flags
);
612 drm_sched_fence_finished(s_fence
);
614 trace_drm_sched_process_job(s_fence
);
615 wake_up_interruptible(&sched
->wake_up_worker
);
617 schedule_work(&s_job
->finish_work
);
621 * drm_sched_blocked - check if the scheduler is blocked
623 * @sched: scheduler instance
625 * Returns true if blocked, otherwise false.
627 static bool drm_sched_blocked(struct drm_gpu_scheduler
*sched
)
629 if (kthread_should_park()) {
638 * drm_sched_main - main scheduler thread
640 * @param: scheduler instance
644 static int drm_sched_main(void *param
)
646 struct sched_param sparam
= {.sched_priority
= 1};
647 struct drm_gpu_scheduler
*sched
= (struct drm_gpu_scheduler
*)param
;
650 sched_setscheduler(current
, SCHED_FIFO
, &sparam
);
652 while (!kthread_should_stop()) {
653 struct drm_sched_entity
*entity
= NULL
;
654 struct drm_sched_fence
*s_fence
;
655 struct drm_sched_job
*sched_job
;
656 struct dma_fence
*fence
;
658 wait_event_interruptible(sched
->wake_up_worker
,
659 (!drm_sched_blocked(sched
) &&
660 (entity
= drm_sched_select_entity(sched
))) ||
661 kthread_should_stop());
666 sched_job
= drm_sched_entity_pop_job(entity
);
670 s_fence
= sched_job
->s_fence
;
672 atomic_inc(&sched
->hw_rq_count
);
673 drm_sched_job_begin(sched_job
);
675 fence
= sched
->ops
->run_job(sched_job
);
676 drm_sched_fence_scheduled(s_fence
);
679 s_fence
->parent
= dma_fence_get(fence
);
680 r
= dma_fence_add_callback(fence
, &sched_job
->cb
,
681 drm_sched_process_job
);
683 drm_sched_process_job(fence
, &sched_job
->cb
);
685 DRM_ERROR("fence add callback failed (%d)\n",
687 dma_fence_put(fence
);
689 drm_sched_process_job(NULL
, &sched_job
->cb
);
691 wake_up(&sched
->job_scheduled
);
697 * drm_sched_init - Init a gpu scheduler instance
699 * @sched: scheduler instance
700 * @ops: backend operations for this scheduler
701 * @hw_submission: number of hw submissions that can be in flight
702 * @hang_limit: number of times to allow a job to hang before dropping it
703 * @timeout: timeout value in jiffies for the scheduler
704 * @name: name used for debugging
706 * Return 0 on success, otherwise error code.
708 int drm_sched_init(struct drm_gpu_scheduler
*sched
,
709 const struct drm_sched_backend_ops
*ops
,
710 unsigned hw_submission
,
717 sched
->hw_submission_limit
= hw_submission
;
719 sched
->timeout
= timeout
;
720 sched
->hang_limit
= hang_limit
;
721 for (i
= DRM_SCHED_PRIORITY_MIN
; i
< DRM_SCHED_PRIORITY_MAX
; i
++)
722 drm_sched_rq_init(sched
, &sched
->sched_rq
[i
]);
724 init_waitqueue_head(&sched
->wake_up_worker
);
725 init_waitqueue_head(&sched
->job_scheduled
);
726 INIT_LIST_HEAD(&sched
->ring_mirror_list
);
727 spin_lock_init(&sched
->job_list_lock
);
728 atomic_set(&sched
->hw_rq_count
, 0);
729 INIT_DELAYED_WORK(&sched
->work_tdr
, drm_sched_job_timedout
);
730 atomic_set(&sched
->num_jobs
, 0);
731 atomic64_set(&sched
->job_id_count
, 0);
733 /* Each scheduler will run on a seperate kernel thread */
734 sched
->thread
= kthread_run(drm_sched_main
, sched
, sched
->name
);
735 if (IS_ERR(sched
->thread
)) {
736 ret
= PTR_ERR(sched
->thread
);
737 sched
->thread
= NULL
;
738 DRM_ERROR("Failed to create scheduler for %s.\n", name
);
745 EXPORT_SYMBOL(drm_sched_init
);
748 * drm_sched_fini - Destroy a gpu scheduler
750 * @sched: scheduler instance
752 * Tears down and cleans up the scheduler.
754 void drm_sched_fini(struct drm_gpu_scheduler
*sched
)
757 kthread_stop(sched
->thread
);
759 sched
->ready
= false;
761 EXPORT_SYMBOL(drm_sched_fini
);