2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/completion.h>
28 #include <drm/drm_print.h>
29 #include <drm/gpu_scheduler.h>
31 #include "gpu_scheduler_trace.h"
33 #define to_drm_sched_job(sched_job) \
34 container_of((sched_job), struct drm_sched_job, queue_node)
37 * drm_sched_entity_init - Init a context entity used by scheduler when
40 * @entity: scheduler entity to init
41 * @priority: priority of the entity
42 * @sched_list: the list of drm scheds on which jobs from this
43 * entity can be submitted
44 * @num_sched_list: number of drm sched in sched_list
45 * @guilty: atomic_t set to 1 when a job on this queue
46 * is found to be guilty causing a timeout
48 * Note: the sched_list should have atleast one element to schedule
51 * Returns 0 on success or a negative error code on failure.
53 int drm_sched_entity_init(struct drm_sched_entity
*entity
,
54 enum drm_sched_priority priority
,
55 struct drm_gpu_scheduler
**sched_list
,
56 unsigned int num_sched_list
,
59 if (!(entity
&& sched_list
&& (num_sched_list
== 0 || sched_list
[0])))
62 memset(entity
, 0, sizeof(struct drm_sched_entity
));
63 INIT_LIST_HEAD(&entity
->list
);
65 entity
->guilty
= guilty
;
66 entity
->num_sched_list
= num_sched_list
;
67 entity
->priority
= priority
;
68 entity
->sched_list
= num_sched_list
> 1 ? sched_list
: NULL
;
69 entity
->last_scheduled
= NULL
;
72 entity
->rq
= &sched_list
[0]->sched_rq
[entity
->priority
];
74 init_completion(&entity
->entity_idle
);
76 spin_lock_init(&entity
->rq_lock
);
77 spsc_queue_init(&entity
->job_queue
);
79 atomic_set(&entity
->fence_seq
, 0);
80 entity
->fence_context
= dma_fence_context_alloc(2);
84 EXPORT_SYMBOL(drm_sched_entity_init
);
87 * drm_sched_entity_is_idle - Check if entity is idle
89 * @entity: scheduler entity
91 * Returns true if the entity does not have any unscheduled jobs.
93 static bool drm_sched_entity_is_idle(struct drm_sched_entity
*entity
)
95 rmb(); /* for list_empty to work without lock */
97 if (list_empty(&entity
->list
) ||
98 spsc_queue_count(&entity
->job_queue
) == 0)
105 * drm_sched_entity_is_ready - Check if entity is ready
107 * @entity: scheduler entity
109 * Return true if entity could provide a job.
111 bool drm_sched_entity_is_ready(struct drm_sched_entity
*entity
)
113 if (spsc_queue_peek(&entity
->job_queue
) == NULL
)
116 if (READ_ONCE(entity
->dependency
))
123 * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load
125 * @entity: scheduler entity
127 * Return the pointer to the rq with least load.
129 static struct drm_sched_rq
*
130 drm_sched_entity_get_free_sched(struct drm_sched_entity
*entity
)
132 struct drm_sched_rq
*rq
= NULL
;
133 unsigned int min_score
= UINT_MAX
, num_score
;
136 for (i
= 0; i
< entity
->num_sched_list
; ++i
) {
137 struct drm_gpu_scheduler
*sched
= entity
->sched_list
[i
];
139 if (!entity
->sched_list
[i
]->ready
) {
140 DRM_WARN("sched%s is not ready, skipping", sched
->name
);
144 num_score
= atomic_read(&sched
->score
);
145 if (num_score
< min_score
) {
146 min_score
= num_score
;
147 rq
= &entity
->sched_list
[i
]->sched_rq
[entity
->priority
];
155 * drm_sched_entity_flush - Flush a context entity
157 * @entity: scheduler entity
158 * @timeout: time to wait in for Q to become empty in jiffies.
160 * Splitting drm_sched_entity_fini() into two functions, The first one does the
161 * waiting, removes the entity from the runqueue and returns an error when the
162 * process was killed.
164 * Returns the remaining time in jiffies left from the input timeout
166 long drm_sched_entity_flush(struct drm_sched_entity
*entity
, long timeout
)
168 struct drm_gpu_scheduler
*sched
;
169 struct task_struct
*last_user
;
175 sched
= entity
->rq
->sched
;
177 * The client will not queue more IBs during this fini, consume existing
178 * queued IBs or discard them on SIGKILL
180 if (current
->flags
& PF_EXITING
) {
182 ret
= wait_event_timeout(
183 sched
->job_scheduled
,
184 drm_sched_entity_is_idle(entity
),
187 wait_event_killable(sched
->job_scheduled
,
188 drm_sched_entity_is_idle(entity
));
191 /* For killed process disable any more IBs enqueue right now */
192 last_user
= cmpxchg(&entity
->last_user
, current
->group_leader
, NULL
);
193 if ((!last_user
|| last_user
== current
->group_leader
) &&
194 (current
->flags
& PF_EXITING
) && (current
->exit_code
== SIGKILL
)) {
195 spin_lock(&entity
->rq_lock
);
196 entity
->stopped
= true;
197 drm_sched_rq_remove_entity(entity
->rq
, entity
);
198 spin_unlock(&entity
->rq_lock
);
203 EXPORT_SYMBOL(drm_sched_entity_flush
);
206 * drm_sched_entity_kill_jobs - helper for drm_sched_entity_kill_jobs
209 * @cb: our callback structure
211 * Signal the scheduler finished fence when the entity in question is killed.
213 static void drm_sched_entity_kill_jobs_cb(struct dma_fence
*f
,
214 struct dma_fence_cb
*cb
)
216 struct drm_sched_job
*job
= container_of(cb
, struct drm_sched_job
,
219 drm_sched_fence_finished(job
->s_fence
);
220 WARN_ON(job
->s_fence
->parent
);
221 job
->sched
->ops
->free_job(job
);
225 * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed
227 * @entity: entity which is cleaned up
229 * Makes sure that all remaining jobs in an entity are killed before it is
232 static void drm_sched_entity_kill_jobs(struct drm_sched_entity
*entity
)
234 struct drm_sched_job
*job
;
237 while ((job
= to_drm_sched_job(spsc_queue_pop(&entity
->job_queue
)))) {
238 struct drm_sched_fence
*s_fence
= job
->s_fence
;
240 drm_sched_fence_scheduled(s_fence
);
241 dma_fence_set_error(&s_fence
->finished
, -ESRCH
);
244 * When pipe is hanged by older entity, new entity might
245 * not even have chance to submit it's first job to HW
246 * and so entity->last_scheduled will remain NULL
248 if (!entity
->last_scheduled
) {
249 drm_sched_entity_kill_jobs_cb(NULL
, &job
->finish_cb
);
253 r
= dma_fence_add_callback(entity
->last_scheduled
,
255 drm_sched_entity_kill_jobs_cb
);
257 drm_sched_entity_kill_jobs_cb(NULL
, &job
->finish_cb
);
259 DRM_ERROR("fence add callback failed (%d)\n", r
);
264 * drm_sched_entity_cleanup - Destroy a context entity
266 * @entity: scheduler entity
268 * This should be called after @drm_sched_entity_do_release. It goes over the
269 * entity and signals all jobs with an error code if the process was killed.
272 void drm_sched_entity_fini(struct drm_sched_entity
*entity
)
274 struct drm_gpu_scheduler
*sched
= NULL
;
277 sched
= entity
->rq
->sched
;
278 drm_sched_rq_remove_entity(entity
->rq
, entity
);
281 /* Consumption of existing IBs wasn't completed. Forcefully
284 if (spsc_queue_count(&entity
->job_queue
)) {
287 * Wait for thread to idle to make sure it isn't processing
290 wait_for_completion(&entity
->entity_idle
);
293 if (entity
->dependency
) {
294 dma_fence_remove_callback(entity
->dependency
,
296 dma_fence_put(entity
->dependency
);
297 entity
->dependency
= NULL
;
300 drm_sched_entity_kill_jobs(entity
);
303 dma_fence_put(entity
->last_scheduled
);
304 entity
->last_scheduled
= NULL
;
306 EXPORT_SYMBOL(drm_sched_entity_fini
);
309 * drm_sched_entity_fini - Destroy a context entity
311 * @entity: scheduler entity
313 * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
315 void drm_sched_entity_destroy(struct drm_sched_entity
*entity
)
317 drm_sched_entity_flush(entity
, MAX_WAIT_SCHED_ENTITY_Q_EMPTY
);
318 drm_sched_entity_fini(entity
);
320 EXPORT_SYMBOL(drm_sched_entity_destroy
);
323 * drm_sched_entity_clear_dep - callback to clear the entities dependency
325 static void drm_sched_entity_clear_dep(struct dma_fence
*f
,
326 struct dma_fence_cb
*cb
)
328 struct drm_sched_entity
*entity
=
329 container_of(cb
, struct drm_sched_entity
, cb
);
331 entity
->dependency
= NULL
;
336 * drm_sched_entity_clear_dep - callback to clear the entities dependency and
339 static void drm_sched_entity_wakeup(struct dma_fence
*f
,
340 struct dma_fence_cb
*cb
)
342 struct drm_sched_entity
*entity
=
343 container_of(cb
, struct drm_sched_entity
, cb
);
345 drm_sched_entity_clear_dep(f
, cb
);
346 drm_sched_wakeup(entity
->rq
->sched
);
350 * drm_sched_entity_set_priority - Sets priority of the entity
352 * @entity: scheduler entity
353 * @priority: scheduler priority
355 * Update the priority of runqueus used for the entity.
357 void drm_sched_entity_set_priority(struct drm_sched_entity
*entity
,
358 enum drm_sched_priority priority
)
360 spin_lock(&entity
->rq_lock
);
361 entity
->priority
= priority
;
362 spin_unlock(&entity
->rq_lock
);
364 EXPORT_SYMBOL(drm_sched_entity_set_priority
);
367 * drm_sched_entity_add_dependency_cb - add callback for the entities dependency
369 * @entity: entity with dependency
371 * Add a callback to the current dependency of the entity to wake up the
372 * scheduler when the entity becomes available.
374 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity
*entity
)
376 struct drm_gpu_scheduler
*sched
= entity
->rq
->sched
;
377 struct dma_fence
*fence
= entity
->dependency
;
378 struct drm_sched_fence
*s_fence
;
380 if (fence
->context
== entity
->fence_context
||
381 fence
->context
== entity
->fence_context
+ 1) {
383 * Fence is a scheduled/finished fence from a job
384 * which belongs to the same entity, we can ignore
385 * fences from ourself
387 dma_fence_put(entity
->dependency
);
391 s_fence
= to_drm_sched_fence(fence
);
392 if (s_fence
&& s_fence
->sched
== sched
) {
395 * Fence is from the same scheduler, only need to wait for
398 fence
= dma_fence_get(&s_fence
->scheduled
);
399 dma_fence_put(entity
->dependency
);
400 entity
->dependency
= fence
;
401 if (!dma_fence_add_callback(fence
, &entity
->cb
,
402 drm_sched_entity_clear_dep
))
405 /* Ignore it when it is already scheduled */
406 dma_fence_put(fence
);
410 if (!dma_fence_add_callback(entity
->dependency
, &entity
->cb
,
411 drm_sched_entity_wakeup
))
414 dma_fence_put(entity
->dependency
);
419 * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity
421 * @entity: entity to get the job from
423 * Process all dependencies and try to get one job from the entities queue.
425 struct drm_sched_job
*drm_sched_entity_pop_job(struct drm_sched_entity
*entity
)
427 struct drm_gpu_scheduler
*sched
= entity
->rq
->sched
;
428 struct drm_sched_job
*sched_job
;
430 sched_job
= to_drm_sched_job(spsc_queue_peek(&entity
->job_queue
));
434 while ((entity
->dependency
=
435 sched
->ops
->dependency(sched_job
, entity
))) {
436 trace_drm_sched_job_wait_dep(sched_job
, entity
->dependency
);
438 if (drm_sched_entity_add_dependency_cb(entity
))
442 /* skip jobs from entity that marked guilty */
443 if (entity
->guilty
&& atomic_read(entity
->guilty
))
444 dma_fence_set_error(&sched_job
->s_fence
->finished
, -ECANCELED
);
446 dma_fence_put(entity
->last_scheduled
);
447 entity
->last_scheduled
= dma_fence_get(&sched_job
->s_fence
->finished
);
449 spsc_queue_pop(&entity
->job_queue
);
454 * drm_sched_entity_select_rq - select a new rq for the entity
456 * @entity: scheduler entity
458 * Check all prerequisites and select a new rq for the entity for load
461 void drm_sched_entity_select_rq(struct drm_sched_entity
*entity
)
463 struct dma_fence
*fence
;
464 struct drm_sched_rq
*rq
;
466 if (spsc_queue_count(&entity
->job_queue
) || entity
->num_sched_list
<= 1)
469 fence
= READ_ONCE(entity
->last_scheduled
);
470 if (fence
&& !dma_fence_is_signaled(fence
))
473 spin_lock(&entity
->rq_lock
);
474 rq
= drm_sched_entity_get_free_sched(entity
);
475 if (rq
!= entity
->rq
) {
476 drm_sched_rq_remove_entity(entity
->rq
, entity
);
480 spin_unlock(&entity
->rq_lock
);
484 * drm_sched_entity_push_job - Submit a job to the entity's job queue
486 * @sched_job: job to submit
487 * @entity: scheduler entity
489 * Note: To guarantee that the order of insertion to queue matches
490 * the job's fence sequence number this function should be
491 * called with drm_sched_job_init under common lock.
493 * Returns 0 for success, negative error code otherwise.
495 void drm_sched_entity_push_job(struct drm_sched_job
*sched_job
,
496 struct drm_sched_entity
*entity
)
500 trace_drm_sched_job(sched_job
, entity
);
501 atomic_inc(&entity
->rq
->sched
->score
);
502 WRITE_ONCE(entity
->last_user
, current
->group_leader
);
503 first
= spsc_queue_push(&entity
->job_queue
, &sched_job
->queue_node
);
505 /* first job wakes up scheduler */
507 /* Add the entity to the run queue */
508 spin_lock(&entity
->rq_lock
);
509 if (entity
->stopped
) {
510 spin_unlock(&entity
->rq_lock
);
512 DRM_ERROR("Trying to push to a killed entity\n");
515 drm_sched_rq_add_entity(entity
->rq
, entity
);
516 spin_unlock(&entity
->rq_lock
);
517 drm_sched_wakeup(entity
->rq
->sched
);
520 EXPORT_SYMBOL(drm_sched_entity_push_job
);