2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/kthread.h>
25 #include <drm/gpu_scheduler.h>
27 #include "gpu_scheduler_trace.h"
29 #define to_drm_sched_job(sched_job) \
30 container_of((sched_job), struct drm_sched_job, queue_node)
33 * drm_sched_entity_init - Init a context entity used by scheduler when
36 * @entity: scheduler entity to init
37 * @rq_list: the list of run queue on which jobs from this
38 * entity can be submitted
39 * @num_rq_list: number of run queue in rq_list
40 * @guilty: atomic_t set to 1 when a job on this queue
41 * is found to be guilty causing a timeout
43 * Note: the rq_list should have atleast one element to schedule
46 * Returns 0 on success or a negative error code on failure.
48 int drm_sched_entity_init(struct drm_sched_entity
*entity
,
49 struct drm_sched_rq
**rq_list
,
50 unsigned int num_rq_list
,
55 if (!(entity
&& rq_list
&& (num_rq_list
== 0 || rq_list
[0])))
58 memset(entity
, 0, sizeof(struct drm_sched_entity
));
59 INIT_LIST_HEAD(&entity
->list
);
61 entity
->guilty
= guilty
;
62 entity
->num_rq_list
= num_rq_list
;
63 entity
->rq_list
= kcalloc(num_rq_list
, sizeof(struct drm_sched_rq
*),
68 for (i
= 0; i
< num_rq_list
; ++i
)
69 entity
->rq_list
[i
] = rq_list
[i
];
72 entity
->rq
= rq_list
[0];
74 entity
->last_scheduled
= NULL
;
76 spin_lock_init(&entity
->rq_lock
);
77 spsc_queue_init(&entity
->job_queue
);
79 atomic_set(&entity
->fence_seq
, 0);
80 entity
->fence_context
= dma_fence_context_alloc(2);
84 EXPORT_SYMBOL(drm_sched_entity_init
);
87 * drm_sched_entity_is_idle - Check if entity is idle
89 * @entity: scheduler entity
91 * Returns true if the entity does not have any unscheduled jobs.
93 static bool drm_sched_entity_is_idle(struct drm_sched_entity
*entity
)
95 rmb(); /* for list_empty to work without lock */
97 if (list_empty(&entity
->list
) ||
98 spsc_queue_peek(&entity
->job_queue
) == NULL
)
105 * drm_sched_entity_is_ready - Check if entity is ready
107 * @entity: scheduler entity
109 * Return true if entity could provide a job.
111 bool drm_sched_entity_is_ready(struct drm_sched_entity
*entity
)
113 if (spsc_queue_peek(&entity
->job_queue
) == NULL
)
116 if (READ_ONCE(entity
->dependency
))
123 * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load
125 * @entity: scheduler entity
127 * Return the pointer to the rq with least load.
129 static struct drm_sched_rq
*
130 drm_sched_entity_get_free_sched(struct drm_sched_entity
*entity
)
132 struct drm_sched_rq
*rq
= NULL
;
133 unsigned int min_jobs
= UINT_MAX
, num_jobs
;
136 for (i
= 0; i
< entity
->num_rq_list
; ++i
) {
137 struct drm_gpu_scheduler
*sched
= entity
->rq_list
[i
]->sched
;
139 if (!entity
->rq_list
[i
]->sched
->ready
) {
140 DRM_WARN("sched%s is not ready, skipping", sched
->name
);
144 num_jobs
= atomic_read(&sched
->num_jobs
);
145 if (num_jobs
< min_jobs
) {
147 rq
= entity
->rq_list
[i
];
155 * drm_sched_entity_flush - Flush a context entity
157 * @entity: scheduler entity
158 * @timeout: time to wait in for Q to become empty in jiffies.
160 * Splitting drm_sched_entity_fini() into two functions, The first one does the
161 * waiting, removes the entity from the runqueue and returns an error when the
162 * process was killed.
164 * Returns the remaining time in jiffies left from the input timeout
166 long drm_sched_entity_flush(struct drm_sched_entity
*entity
, long timeout
)
168 struct drm_gpu_scheduler
*sched
;
169 struct task_struct
*last_user
;
175 sched
= entity
->rq
->sched
;
177 * The client will not queue more IBs during this fini, consume existing
178 * queued IBs or discard them on SIGKILL
180 if (current
->flags
& PF_EXITING
) {
182 ret
= wait_event_timeout(
183 sched
->job_scheduled
,
184 drm_sched_entity_is_idle(entity
),
187 wait_event_killable(sched
->job_scheduled
,
188 drm_sched_entity_is_idle(entity
));
191 /* For killed process disable any more IBs enqueue right now */
192 last_user
= cmpxchg(&entity
->last_user
, current
->group_leader
, NULL
);
193 if ((!last_user
|| last_user
== current
->group_leader
) &&
194 (current
->flags
& PF_EXITING
) && (current
->exit_code
== SIGKILL
)) {
195 spin_lock(&entity
->rq_lock
);
196 entity
->stopped
= true;
197 drm_sched_rq_remove_entity(entity
->rq
, entity
);
198 spin_unlock(&entity
->rq_lock
);
203 EXPORT_SYMBOL(drm_sched_entity_flush
);
206 * drm_sched_entity_kill_jobs - helper for drm_sched_entity_kill_jobs
209 * @cb: our callback structure
211 * Signal the scheduler finished fence when the entity in question is killed.
213 static void drm_sched_entity_kill_jobs_cb(struct dma_fence
*f
,
214 struct dma_fence_cb
*cb
)
216 struct drm_sched_job
*job
= container_of(cb
, struct drm_sched_job
,
219 drm_sched_fence_finished(job
->s_fence
);
220 WARN_ON(job
->s_fence
->parent
);
221 job
->sched
->ops
->free_job(job
);
225 * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed
227 * @entity: entity which is cleaned up
229 * Makes sure that all remaining jobs in an entity are killed before it is
232 static void drm_sched_entity_kill_jobs(struct drm_sched_entity
*entity
)
234 struct drm_sched_job
*job
;
237 while ((job
= to_drm_sched_job(spsc_queue_pop(&entity
->job_queue
)))) {
238 struct drm_sched_fence
*s_fence
= job
->s_fence
;
240 drm_sched_fence_scheduled(s_fence
);
241 dma_fence_set_error(&s_fence
->finished
, -ESRCH
);
244 * When pipe is hanged by older entity, new entity might
245 * not even have chance to submit it's first job to HW
246 * and so entity->last_scheduled will remain NULL
248 if (!entity
->last_scheduled
) {
249 drm_sched_entity_kill_jobs_cb(NULL
, &job
->finish_cb
);
253 r
= dma_fence_add_callback(entity
->last_scheduled
,
255 drm_sched_entity_kill_jobs_cb
);
257 drm_sched_entity_kill_jobs_cb(NULL
, &job
->finish_cb
);
259 DRM_ERROR("fence add callback failed (%d)\n", r
);
264 * drm_sched_entity_cleanup - Destroy a context entity
266 * @entity: scheduler entity
268 * This should be called after @drm_sched_entity_do_release. It goes over the
269 * entity and signals all jobs with an error code if the process was killed.
272 void drm_sched_entity_fini(struct drm_sched_entity
*entity
)
274 struct drm_gpu_scheduler
*sched
= NULL
;
277 sched
= entity
->rq
->sched
;
278 drm_sched_rq_remove_entity(entity
->rq
, entity
);
281 /* Consumption of existing IBs wasn't completed. Forcefully
284 if (spsc_queue_peek(&entity
->job_queue
)) {
286 /* Park the kernel for a moment to make sure it isn't processing
289 kthread_park(sched
->thread
);
290 kthread_unpark(sched
->thread
);
292 if (entity
->dependency
) {
293 dma_fence_remove_callback(entity
->dependency
,
295 dma_fence_put(entity
->dependency
);
296 entity
->dependency
= NULL
;
299 drm_sched_entity_kill_jobs(entity
);
302 dma_fence_put(entity
->last_scheduled
);
303 entity
->last_scheduled
= NULL
;
304 kfree(entity
->rq_list
);
306 EXPORT_SYMBOL(drm_sched_entity_fini
);
309 * drm_sched_entity_fini - Destroy a context entity
311 * @entity: scheduler entity
313 * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
315 void drm_sched_entity_destroy(struct drm_sched_entity
*entity
)
317 drm_sched_entity_flush(entity
, MAX_WAIT_SCHED_ENTITY_Q_EMPTY
);
318 drm_sched_entity_fini(entity
);
320 EXPORT_SYMBOL(drm_sched_entity_destroy
);
323 * drm_sched_entity_clear_dep - callback to clear the entities dependency
325 static void drm_sched_entity_clear_dep(struct dma_fence
*f
,
326 struct dma_fence_cb
*cb
)
328 struct drm_sched_entity
*entity
=
329 container_of(cb
, struct drm_sched_entity
, cb
);
331 entity
->dependency
= NULL
;
336 * drm_sched_entity_clear_dep - callback to clear the entities dependency and
339 static void drm_sched_entity_wakeup(struct dma_fence
*f
,
340 struct dma_fence_cb
*cb
)
342 struct drm_sched_entity
*entity
=
343 container_of(cb
, struct drm_sched_entity
, cb
);
345 drm_sched_entity_clear_dep(f
, cb
);
346 drm_sched_wakeup(entity
->rq
->sched
);
350 * drm_sched_entity_set_rq_priority - helper for drm_sched_entity_set_priority
352 static void drm_sched_entity_set_rq_priority(struct drm_sched_rq
**rq
,
353 enum drm_sched_priority priority
)
355 *rq
= &(*rq
)->sched
->sched_rq
[priority
];
359 * drm_sched_entity_set_priority - Sets priority of the entity
361 * @entity: scheduler entity
362 * @priority: scheduler priority
364 * Update the priority of runqueus used for the entity.
366 void drm_sched_entity_set_priority(struct drm_sched_entity
*entity
,
367 enum drm_sched_priority priority
)
371 spin_lock(&entity
->rq_lock
);
373 for (i
= 0; i
< entity
->num_rq_list
; ++i
)
374 drm_sched_entity_set_rq_priority(&entity
->rq_list
[i
], priority
);
377 drm_sched_rq_remove_entity(entity
->rq
, entity
);
378 drm_sched_entity_set_rq_priority(&entity
->rq
, priority
);
379 drm_sched_rq_add_entity(entity
->rq
, entity
);
382 spin_unlock(&entity
->rq_lock
);
384 EXPORT_SYMBOL(drm_sched_entity_set_priority
);
387 * drm_sched_entity_add_dependency_cb - add callback for the entities dependency
389 * @entity: entity with dependency
391 * Add a callback to the current dependency of the entity to wake up the
392 * scheduler when the entity becomes available.
394 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity
*entity
)
396 struct drm_gpu_scheduler
*sched
= entity
->rq
->sched
;
397 struct dma_fence
*fence
= entity
->dependency
;
398 struct drm_sched_fence
*s_fence
;
400 if (fence
->context
== entity
->fence_context
||
401 fence
->context
== entity
->fence_context
+ 1) {
403 * Fence is a scheduled/finished fence from a job
404 * which belongs to the same entity, we can ignore
405 * fences from ourself
407 dma_fence_put(entity
->dependency
);
411 s_fence
= to_drm_sched_fence(fence
);
412 if (s_fence
&& s_fence
->sched
== sched
) {
415 * Fence is from the same scheduler, only need to wait for
418 fence
= dma_fence_get(&s_fence
->scheduled
);
419 dma_fence_put(entity
->dependency
);
420 entity
->dependency
= fence
;
421 if (!dma_fence_add_callback(fence
, &entity
->cb
,
422 drm_sched_entity_clear_dep
))
425 /* Ignore it when it is already scheduled */
426 dma_fence_put(fence
);
430 if (!dma_fence_add_callback(entity
->dependency
, &entity
->cb
,
431 drm_sched_entity_wakeup
))
434 dma_fence_put(entity
->dependency
);
439 * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity
441 * @entity: entity to get the job from
443 * Process all dependencies and try to get one job from the entities queue.
445 struct drm_sched_job
*drm_sched_entity_pop_job(struct drm_sched_entity
*entity
)
447 struct drm_gpu_scheduler
*sched
= entity
->rq
->sched
;
448 struct drm_sched_job
*sched_job
;
450 sched_job
= to_drm_sched_job(spsc_queue_peek(&entity
->job_queue
));
454 while ((entity
->dependency
=
455 sched
->ops
->dependency(sched_job
, entity
))) {
456 trace_drm_sched_job_wait_dep(sched_job
, entity
->dependency
);
458 if (drm_sched_entity_add_dependency_cb(entity
))
462 /* skip jobs from entity that marked guilty */
463 if (entity
->guilty
&& atomic_read(entity
->guilty
))
464 dma_fence_set_error(&sched_job
->s_fence
->finished
, -ECANCELED
);
466 dma_fence_put(entity
->last_scheduled
);
467 entity
->last_scheduled
= dma_fence_get(&sched_job
->s_fence
->finished
);
469 spsc_queue_pop(&entity
->job_queue
);
474 * drm_sched_entity_select_rq - select a new rq for the entity
476 * @entity: scheduler entity
478 * Check all prerequisites and select a new rq for the entity for load
481 void drm_sched_entity_select_rq(struct drm_sched_entity
*entity
)
483 struct dma_fence
*fence
;
484 struct drm_sched_rq
*rq
;
486 if (spsc_queue_count(&entity
->job_queue
) || entity
->num_rq_list
<= 1)
489 fence
= READ_ONCE(entity
->last_scheduled
);
490 if (fence
&& !dma_fence_is_signaled(fence
))
493 rq
= drm_sched_entity_get_free_sched(entity
);
494 if (rq
== entity
->rq
)
497 spin_lock(&entity
->rq_lock
);
498 drm_sched_rq_remove_entity(entity
->rq
, entity
);
500 spin_unlock(&entity
->rq_lock
);
504 * drm_sched_entity_push_job - Submit a job to the entity's job queue
506 * @sched_job: job to submit
507 * @entity: scheduler entity
509 * Note: To guarantee that the order of insertion to queue matches
510 * the job's fence sequence number this function should be
511 * called with drm_sched_job_init under common lock.
513 * Returns 0 for success, negative error code otherwise.
515 void drm_sched_entity_push_job(struct drm_sched_job
*sched_job
,
516 struct drm_sched_entity
*entity
)
520 trace_drm_sched_job(sched_job
, entity
);
521 atomic_inc(&entity
->rq
->sched
->num_jobs
);
522 WRITE_ONCE(entity
->last_user
, current
->group_leader
);
523 first
= spsc_queue_push(&entity
->job_queue
, &sched_job
->queue_node
);
525 /* first job wakes up scheduler */
527 /* Add the entity to the run queue */
528 spin_lock(&entity
->rq_lock
);
529 if (entity
->stopped
) {
530 spin_unlock(&entity
->rq_lock
);
532 DRM_ERROR("Trying to push to a killed entity\n");
535 drm_sched_rq_add_entity(entity
->rq
, entity
);
536 spin_unlock(&entity
->rq_lock
);
537 drm_sched_wakeup(entity
->rq
->sched
);
540 EXPORT_SYMBOL(drm_sched_entity_push_job
);