WIP FPC-III support
[linux/fpc-iii.git] / drivers / gpu / drm / scheduler / sched_entity.c
blobc1ac3e4003c6f9b059e24da64f9ff972c8d82e5b
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/completion.h>
28 #include <drm/drm_print.h>
29 #include <drm/gpu_scheduler.h>
31 #include "gpu_scheduler_trace.h"
33 #define to_drm_sched_job(sched_job) \
34 container_of((sched_job), struct drm_sched_job, queue_node)
36 /**
37 * drm_sched_entity_init - Init a context entity used by scheduler when
38 * submit to HW ring.
40 * @entity: scheduler entity to init
41 * @priority: priority of the entity
42 * @sched_list: the list of drm scheds on which jobs from this
43 * entity can be submitted
44 * @num_sched_list: number of drm sched in sched_list
45 * @guilty: atomic_t set to 1 when a job on this queue
46 * is found to be guilty causing a timeout
48 * Note: the sched_list should have at least one element to schedule
49 * the entity
51 * Returns 0 on success or a negative error code on failure.
53 int drm_sched_entity_init(struct drm_sched_entity *entity,
54 enum drm_sched_priority priority,
55 struct drm_gpu_scheduler **sched_list,
56 unsigned int num_sched_list,
57 atomic_t *guilty)
59 if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
60 return -EINVAL;
62 memset(entity, 0, sizeof(struct drm_sched_entity));
63 INIT_LIST_HEAD(&entity->list);
64 entity->rq = NULL;
65 entity->guilty = guilty;
66 entity->num_sched_list = num_sched_list;
67 entity->priority = priority;
68 entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
69 entity->last_scheduled = NULL;
71 if(num_sched_list)
72 entity->rq = &sched_list[0]->sched_rq[entity->priority];
74 init_completion(&entity->entity_idle);
76 /* We start in an idle state. */
77 complete(&entity->entity_idle);
79 spin_lock_init(&entity->rq_lock);
80 spsc_queue_init(&entity->job_queue);
82 atomic_set(&entity->fence_seq, 0);
83 entity->fence_context = dma_fence_context_alloc(2);
85 return 0;
87 EXPORT_SYMBOL(drm_sched_entity_init);
89 /**
90 * drm_sched_entity_modify_sched - Modify sched of an entity
91 * @entity: scheduler entity to init
92 * @sched_list: the list of new drm scheds which will replace
93 * existing entity->sched_list
94 * @num_sched_list: number of drm sched in sched_list
96 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
97 struct drm_gpu_scheduler **sched_list,
98 unsigned int num_sched_list)
100 WARN_ON(!num_sched_list || !sched_list);
102 entity->sched_list = sched_list;
103 entity->num_sched_list = num_sched_list;
105 EXPORT_SYMBOL(drm_sched_entity_modify_sched);
108 * drm_sched_entity_is_idle - Check if entity is idle
110 * @entity: scheduler entity
112 * Returns true if the entity does not have any unscheduled jobs.
114 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
116 rmb(); /* for list_empty to work without lock */
118 if (list_empty(&entity->list) ||
119 spsc_queue_count(&entity->job_queue) == 0)
120 return true;
122 return false;
126 * drm_sched_entity_is_ready - Check if entity is ready
128 * @entity: scheduler entity
130 * Return true if entity could provide a job.
132 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
134 if (spsc_queue_peek(&entity->job_queue) == NULL)
135 return false;
137 if (READ_ONCE(entity->dependency))
138 return false;
140 return true;
144 * drm_sched_entity_flush - Flush a context entity
146 * @entity: scheduler entity
147 * @timeout: time to wait in for Q to become empty in jiffies.
149 * Splitting drm_sched_entity_fini() into two functions, The first one does the
150 * waiting, removes the entity from the runqueue and returns an error when the
151 * process was killed.
153 * Returns the remaining time in jiffies left from the input timeout
155 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
157 struct drm_gpu_scheduler *sched;
158 struct task_struct *last_user;
159 long ret = timeout;
161 if (!entity->rq)
162 return 0;
164 sched = entity->rq->sched;
166 * The client will not queue more IBs during this fini, consume existing
167 * queued IBs or discard them on SIGKILL
169 if (current->flags & PF_EXITING) {
170 if (timeout)
171 ret = wait_event_timeout(
172 sched->job_scheduled,
173 drm_sched_entity_is_idle(entity),
174 timeout);
175 } else {
176 wait_event_killable(sched->job_scheduled,
177 drm_sched_entity_is_idle(entity));
180 /* For killed process disable any more IBs enqueue right now */
181 last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
182 if ((!last_user || last_user == current->group_leader) &&
183 (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) {
184 spin_lock(&entity->rq_lock);
185 entity->stopped = true;
186 drm_sched_rq_remove_entity(entity->rq, entity);
187 spin_unlock(&entity->rq_lock);
190 return ret;
192 EXPORT_SYMBOL(drm_sched_entity_flush);
195 * drm_sched_entity_kill_jobs - helper for drm_sched_entity_kill_jobs
197 * @f: signaled fence
198 * @cb: our callback structure
200 * Signal the scheduler finished fence when the entity in question is killed.
202 static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
203 struct dma_fence_cb *cb)
205 struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
206 finish_cb);
208 drm_sched_fence_finished(job->s_fence);
209 WARN_ON(job->s_fence->parent);
210 job->sched->ops->free_job(job);
214 * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed
216 * @entity: entity which is cleaned up
218 * Makes sure that all remaining jobs in an entity are killed before it is
219 * destroyed.
221 static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
223 struct drm_sched_job *job;
224 int r;
226 while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
227 struct drm_sched_fence *s_fence = job->s_fence;
229 drm_sched_fence_scheduled(s_fence);
230 dma_fence_set_error(&s_fence->finished, -ESRCH);
233 * When pipe is hanged by older entity, new entity might
234 * not even have chance to submit it's first job to HW
235 * and so entity->last_scheduled will remain NULL
237 if (!entity->last_scheduled) {
238 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
239 continue;
242 r = dma_fence_add_callback(entity->last_scheduled,
243 &job->finish_cb,
244 drm_sched_entity_kill_jobs_cb);
245 if (r == -ENOENT)
246 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
247 else if (r)
248 DRM_ERROR("fence add callback failed (%d)\n", r);
253 * drm_sched_entity_cleanup - Destroy a context entity
255 * @entity: scheduler entity
257 * This should be called after @drm_sched_entity_do_release. It goes over the
258 * entity and signals all jobs with an error code if the process was killed.
261 void drm_sched_entity_fini(struct drm_sched_entity *entity)
263 struct drm_gpu_scheduler *sched = NULL;
265 if (entity->rq) {
266 sched = entity->rq->sched;
267 drm_sched_rq_remove_entity(entity->rq, entity);
270 /* Consumption of existing IBs wasn't completed. Forcefully
271 * remove them here.
273 if (spsc_queue_count(&entity->job_queue)) {
274 if (sched) {
276 * Wait for thread to idle to make sure it isn't processing
277 * this entity.
279 wait_for_completion(&entity->entity_idle);
282 if (entity->dependency) {
283 dma_fence_remove_callback(entity->dependency,
284 &entity->cb);
285 dma_fence_put(entity->dependency);
286 entity->dependency = NULL;
289 drm_sched_entity_kill_jobs(entity);
292 dma_fence_put(entity->last_scheduled);
293 entity->last_scheduled = NULL;
295 EXPORT_SYMBOL(drm_sched_entity_fini);
298 * drm_sched_entity_fini - Destroy a context entity
300 * @entity: scheduler entity
302 * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
304 void drm_sched_entity_destroy(struct drm_sched_entity *entity)
306 drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
307 drm_sched_entity_fini(entity);
309 EXPORT_SYMBOL(drm_sched_entity_destroy);
312 * drm_sched_entity_clear_dep - callback to clear the entities dependency
314 static void drm_sched_entity_clear_dep(struct dma_fence *f,
315 struct dma_fence_cb *cb)
317 struct drm_sched_entity *entity =
318 container_of(cb, struct drm_sched_entity, cb);
320 entity->dependency = NULL;
321 dma_fence_put(f);
325 * drm_sched_entity_clear_dep - callback to clear the entities dependency and
326 * wake up scheduler
328 static void drm_sched_entity_wakeup(struct dma_fence *f,
329 struct dma_fence_cb *cb)
331 struct drm_sched_entity *entity =
332 container_of(cb, struct drm_sched_entity, cb);
334 drm_sched_entity_clear_dep(f, cb);
335 drm_sched_wakeup(entity->rq->sched);
339 * drm_sched_entity_set_priority - Sets priority of the entity
341 * @entity: scheduler entity
342 * @priority: scheduler priority
344 * Update the priority of runqueus used for the entity.
346 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
347 enum drm_sched_priority priority)
349 spin_lock(&entity->rq_lock);
350 entity->priority = priority;
351 spin_unlock(&entity->rq_lock);
353 EXPORT_SYMBOL(drm_sched_entity_set_priority);
356 * drm_sched_entity_add_dependency_cb - add callback for the entities dependency
358 * @entity: entity with dependency
360 * Add a callback to the current dependency of the entity to wake up the
361 * scheduler when the entity becomes available.
363 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
365 struct drm_gpu_scheduler *sched = entity->rq->sched;
366 struct dma_fence *fence = entity->dependency;
367 struct drm_sched_fence *s_fence;
369 if (fence->context == entity->fence_context ||
370 fence->context == entity->fence_context + 1) {
372 * Fence is a scheduled/finished fence from a job
373 * which belongs to the same entity, we can ignore
374 * fences from ourself
376 dma_fence_put(entity->dependency);
377 return false;
380 s_fence = to_drm_sched_fence(fence);
381 if (s_fence && s_fence->sched == sched) {
384 * Fence is from the same scheduler, only need to wait for
385 * it to be scheduled
387 fence = dma_fence_get(&s_fence->scheduled);
388 dma_fence_put(entity->dependency);
389 entity->dependency = fence;
390 if (!dma_fence_add_callback(fence, &entity->cb,
391 drm_sched_entity_clear_dep))
392 return true;
394 /* Ignore it when it is already scheduled */
395 dma_fence_put(fence);
396 return false;
399 if (!dma_fence_add_callback(entity->dependency, &entity->cb,
400 drm_sched_entity_wakeup))
401 return true;
403 dma_fence_put(entity->dependency);
404 return false;
408 * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity
410 * @entity: entity to get the job from
412 * Process all dependencies and try to get one job from the entities queue.
414 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
416 struct drm_gpu_scheduler *sched = entity->rq->sched;
417 struct drm_sched_job *sched_job;
419 sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
420 if (!sched_job)
421 return NULL;
423 while ((entity->dependency =
424 sched->ops->dependency(sched_job, entity))) {
425 trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
427 if (drm_sched_entity_add_dependency_cb(entity))
428 return NULL;
431 /* skip jobs from entity that marked guilty */
432 if (entity->guilty && atomic_read(entity->guilty))
433 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
435 dma_fence_put(entity->last_scheduled);
436 entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
438 spsc_queue_pop(&entity->job_queue);
439 return sched_job;
443 * drm_sched_entity_select_rq - select a new rq for the entity
445 * @entity: scheduler entity
447 * Check all prerequisites and select a new rq for the entity for load
448 * balancing.
450 void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
452 struct dma_fence *fence;
453 struct drm_gpu_scheduler *sched;
454 struct drm_sched_rq *rq;
456 if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1)
457 return;
459 fence = READ_ONCE(entity->last_scheduled);
460 if (fence && !dma_fence_is_signaled(fence))
461 return;
463 spin_lock(&entity->rq_lock);
464 sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
465 rq = sched ? &sched->sched_rq[entity->priority] : NULL;
466 if (rq != entity->rq) {
467 drm_sched_rq_remove_entity(entity->rq, entity);
468 entity->rq = rq;
471 spin_unlock(&entity->rq_lock);
475 * drm_sched_entity_push_job - Submit a job to the entity's job queue
477 * @sched_job: job to submit
478 * @entity: scheduler entity
480 * Note: To guarantee that the order of insertion to queue matches
481 * the job's fence sequence number this function should be
482 * called with drm_sched_job_init under common lock.
484 * Returns 0 for success, negative error code otherwise.
486 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
487 struct drm_sched_entity *entity)
489 bool first;
491 trace_drm_sched_job(sched_job, entity);
492 atomic_inc(&entity->rq->sched->score);
493 WRITE_ONCE(entity->last_user, current->group_leader);
494 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
496 /* first job wakes up scheduler */
497 if (first) {
498 /* Add the entity to the run queue */
499 spin_lock(&entity->rq_lock);
500 if (entity->stopped) {
501 spin_unlock(&entity->rq_lock);
503 DRM_ERROR("Trying to push to a killed entity\n");
504 return;
506 drm_sched_rq_add_entity(entity->rq, entity);
507 spin_unlock(&entity->rq_lock);
508 drm_sched_wakeup(entity->rq->sched);
511 EXPORT_SYMBOL(drm_sched_entity_push_job);