2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #ifndef _DRM_GPU_SCHEDULER_H_
25 #define _DRM_GPU_SCHEDULER_H_
27 #include <drm/spsc_queue.h>
28 #include <linux/dma-fence.h>
29 #include <linux/completion.h>
31 #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
33 struct drm_gpu_scheduler
;
36 enum drm_sched_priority
{
37 DRM_SCHED_PRIORITY_MIN
,
38 DRM_SCHED_PRIORITY_LOW
= DRM_SCHED_PRIORITY_MIN
,
39 DRM_SCHED_PRIORITY_NORMAL
,
40 DRM_SCHED_PRIORITY_HIGH_SW
,
41 DRM_SCHED_PRIORITY_HIGH_HW
,
42 DRM_SCHED_PRIORITY_KERNEL
,
43 DRM_SCHED_PRIORITY_MAX
,
44 DRM_SCHED_PRIORITY_INVALID
= -1,
45 DRM_SCHED_PRIORITY_UNSET
= -2
49 * struct drm_sched_entity - A wrapper around a job queue (typically
50 * attached to the DRM file_priv).
52 * @list: used to append this struct to the list of entities in the
54 * @rq: runqueue on which this entity is currently scheduled.
55 * @sched_list: A list of schedulers (drm_gpu_schedulers).
56 * Jobs from this entity can be scheduled on any scheduler
58 * @num_sched_list: number of drm_gpu_schedulers in the sched_list.
59 * @priority: priority of the entity
60 * @rq_lock: lock to modify the runqueue to which this entity belongs.
61 * @job_queue: the list of jobs of this entity.
62 * @fence_seq: a linearly increasing seqno incremented with each
63 * new &drm_sched_fence which is part of the entity.
64 * @fence_context: a unique context for all the fences which belong
66 * The &drm_sched_fence.scheduled uses the
67 * fence_context but &drm_sched_fence.finished uses
69 * @dependency: the dependency fence of the job which is on the top
71 * @cb: callback for the dependency fence above.
72 * @guilty: points to ctx's guilty.
73 * @fini_status: contains the exit status in case the process was signalled.
74 * @last_scheduled: points to the finished fence of the last scheduled job.
75 * @last_user: last group leader pushing a job into the entity.
76 * @stopped: Marks the enity as removed from rq and destined for termination.
77 * @entity_idle: Signals when enityt is not in use
79 * Entities will emit jobs in order to their corresponding hardware
80 * ring, and the scheduler will alternate between entities based on
83 struct drm_sched_entity
{
84 struct list_head list
;
85 struct drm_sched_rq
*rq
;
86 struct drm_gpu_scheduler
**sched_list
;
87 unsigned int num_sched_list
;
88 enum drm_sched_priority priority
;
91 struct spsc_queue job_queue
;
94 uint64_t fence_context
;
96 struct dma_fence
*dependency
;
97 struct dma_fence_cb cb
;
99 struct dma_fence
*last_scheduled
;
100 struct task_struct
*last_user
;
102 struct completion entity_idle
;
106 * struct drm_sched_rq - queue of entities to be scheduled.
108 * @lock: to modify the entities list.
109 * @sched: the scheduler to which this rq belongs to.
110 * @entities: list of the entities to be scheduled.
111 * @current_entity: the entity which is to be scheduled.
113 * Run queue is a set of entities scheduling command submissions for
114 * one specific ring. It implements the scheduling policy that selects
115 * the next entity to emit commands from.
117 struct drm_sched_rq
{
119 struct drm_gpu_scheduler
*sched
;
120 struct list_head entities
;
121 struct drm_sched_entity
*current_entity
;
125 * struct drm_sched_fence - fences corresponding to the scheduling of a job.
127 struct drm_sched_fence
{
129 * @scheduled: this fence is what will be signaled by the scheduler
130 * when the job is scheduled.
132 struct dma_fence scheduled
;
135 * @finished: this fence is what will be signaled by the scheduler
136 * when the job is completed.
138 * When setting up an out fence for the job, you should use
139 * this, since it's available immediately upon
140 * drm_sched_job_init(), and the fence returned by the driver
141 * from run_job() won't be created until the dependencies have
144 struct dma_fence finished
;
147 * @parent: the fence returned by &drm_sched_backend_ops.run_job
148 * when scheduling the job on hardware. We signal the
149 * &drm_sched_fence.finished fence once parent is signalled.
151 struct dma_fence
*parent
;
153 * @sched: the scheduler instance to which the job having this struct
156 struct drm_gpu_scheduler
*sched
;
158 * @lock: the lock used by the scheduled and the finished fences.
162 * @owner: job owner for debugging
167 struct drm_sched_fence
*to_drm_sched_fence(struct dma_fence
*f
);
170 * struct drm_sched_job - A job to be run by an entity.
172 * @queue_node: used to append this struct to the queue of jobs in an entity.
173 * @sched: the scheduler instance on which this job is scheduled.
174 * @s_fence: contains the fences for the scheduling of job.
175 * @finish_cb: the callback for the finished fence.
176 * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
177 * @id: a unique id assigned to each job scheduled on the scheduler.
178 * @karma: increment on every hang caused by this job. If this exceeds the hang
179 * limit of the scheduler then the job is marked guilty and will not
180 * be scheduled further.
181 * @s_priority: the priority of the job.
182 * @entity: the entity to which this job belongs.
183 * @cb: the callback for the parent fence in s_fence.
185 * A job is created by the driver using drm_sched_job_init(), and
186 * should call drm_sched_entity_push_job() once it wants the scheduler
187 * to schedule the job.
189 struct drm_sched_job
{
190 struct spsc_node queue_node
;
191 struct drm_gpu_scheduler
*sched
;
192 struct drm_sched_fence
*s_fence
;
193 struct dma_fence_cb finish_cb
;
194 struct list_head node
;
197 enum drm_sched_priority s_priority
;
198 struct drm_sched_entity
*entity
;
199 struct dma_fence_cb cb
;
202 static inline bool drm_sched_invalidate_job(struct drm_sched_job
*s_job
,
205 return (s_job
&& atomic_inc_return(&s_job
->karma
) > threshold
);
209 * struct drm_sched_backend_ops
211 * Define the backend operations called by the scheduler,
212 * these functions should be implemented in driver side.
214 struct drm_sched_backend_ops
{
216 * @dependency: Called when the scheduler is considering scheduling
217 * this job next, to get another struct dma_fence for this job to
218 * block on. Once it returns NULL, run_job() may be called.
220 struct dma_fence
*(*dependency
)(struct drm_sched_job
*sched_job
,
221 struct drm_sched_entity
*s_entity
);
224 * @run_job: Called to execute the job once all of the dependencies
225 * have been resolved. This may be called multiple times, if
226 * timedout_job() has happened and drm_sched_job_recovery()
227 * decides to try it again.
229 struct dma_fence
*(*run_job
)(struct drm_sched_job
*sched_job
);
232 * @timedout_job: Called when a job has taken too long to execute,
233 * to trigger GPU recovery.
235 void (*timedout_job
)(struct drm_sched_job
*sched_job
);
238 * @free_job: Called once the job's finished fence has been signaled
239 * and it's time to clean it up.
241 void (*free_job
)(struct drm_sched_job
*sched_job
);
245 * struct drm_gpu_scheduler
247 * @ops: backend operations provided by the driver.
248 * @hw_submission_limit: the max size of the hardware queue.
249 * @timeout: the time after which a job is removed from the scheduler.
250 * @name: name of the ring for which this scheduler is being used.
251 * @sched_rq: priority wise array of run queues.
252 * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
253 * is ready to be scheduled.
254 * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
255 * waits on this wait queue until all the scheduled jobs are
257 * @hw_rq_count: the number of jobs currently in the hardware queue.
258 * @job_id_count: used to assign unique id to the each job.
259 * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
260 * timeout interval is over.
261 * @thread: the kthread on which the scheduler which run.
262 * @ring_mirror_list: the list of jobs which are currently in the job queue.
263 * @job_list_lock: lock to protect the ring_mirror_list.
264 * @hang_limit: once the hangs by a job crosses this limit then it is marked
265 * guilty and it will be considered for scheduling further.
266 * @num_jobs: the number of jobs in queue in the scheduler
267 * @ready: marks if the underlying HW is ready to work
268 * @free_guilty: A hit to time out handler to free the guilty job.
270 * One scheduler is implemented for each hardware ring.
272 struct drm_gpu_scheduler
{
273 const struct drm_sched_backend_ops
*ops
;
274 uint32_t hw_submission_limit
;
277 struct drm_sched_rq sched_rq
[DRM_SCHED_PRIORITY_MAX
];
278 wait_queue_head_t wake_up_worker
;
279 wait_queue_head_t job_scheduled
;
280 atomic_t hw_rq_count
;
281 atomic64_t job_id_count
;
282 struct delayed_work work_tdr
;
283 struct task_struct
*thread
;
284 struct list_head ring_mirror_list
;
285 spinlock_t job_list_lock
;
292 int drm_sched_init(struct drm_gpu_scheduler
*sched
,
293 const struct drm_sched_backend_ops
*ops
,
294 uint32_t hw_submission
, unsigned hang_limit
, long timeout
,
297 void drm_sched_fini(struct drm_gpu_scheduler
*sched
);
298 int drm_sched_job_init(struct drm_sched_job
*job
,
299 struct drm_sched_entity
*entity
,
301 void drm_sched_entity_modify_sched(struct drm_sched_entity
*entity
,
302 struct drm_gpu_scheduler
**sched_list
,
303 unsigned int num_sched_list
);
305 void drm_sched_job_cleanup(struct drm_sched_job
*job
);
306 void drm_sched_wakeup(struct drm_gpu_scheduler
*sched
);
307 void drm_sched_stop(struct drm_gpu_scheduler
*sched
, struct drm_sched_job
*bad
);
308 void drm_sched_start(struct drm_gpu_scheduler
*sched
, bool full_recovery
);
309 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler
*sched
);
310 void drm_sched_increase_karma(struct drm_sched_job
*bad
);
311 bool drm_sched_dependency_optimized(struct dma_fence
* fence
,
312 struct drm_sched_entity
*entity
);
313 void drm_sched_fault(struct drm_gpu_scheduler
*sched
);
314 void drm_sched_job_kickout(struct drm_sched_job
*s_job
);
316 void drm_sched_rq_add_entity(struct drm_sched_rq
*rq
,
317 struct drm_sched_entity
*entity
);
318 void drm_sched_rq_remove_entity(struct drm_sched_rq
*rq
,
319 struct drm_sched_entity
*entity
);
321 int drm_sched_entity_init(struct drm_sched_entity
*entity
,
322 enum drm_sched_priority priority
,
323 struct drm_gpu_scheduler
**sched_list
,
324 unsigned int num_sched_list
,
326 long drm_sched_entity_flush(struct drm_sched_entity
*entity
, long timeout
);
327 void drm_sched_entity_fini(struct drm_sched_entity
*entity
);
328 void drm_sched_entity_destroy(struct drm_sched_entity
*entity
);
329 void drm_sched_entity_select_rq(struct drm_sched_entity
*entity
);
330 struct drm_sched_job
*drm_sched_entity_pop_job(struct drm_sched_entity
*entity
);
331 void drm_sched_entity_push_job(struct drm_sched_job
*sched_job
,
332 struct drm_sched_entity
*entity
);
333 void drm_sched_entity_set_priority(struct drm_sched_entity
*entity
,
334 enum drm_sched_priority priority
);
335 bool drm_sched_entity_is_ready(struct drm_sched_entity
*entity
);
337 struct drm_sched_fence
*drm_sched_fence_create(
338 struct drm_sched_entity
*s_entity
, void *owner
);
339 void drm_sched_fence_scheduled(struct drm_sched_fence
*fence
);
340 void drm_sched_fence_finished(struct drm_sched_fence
*fence
);
342 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler
*sched
);
343 void drm_sched_resume_timeout(struct drm_gpu_scheduler
*sched
,
344 unsigned long remaining
);
345 struct drm_gpu_scheduler
*
346 drm_sched_pick_best(struct drm_gpu_scheduler
**sched_list
,
347 unsigned int num_sched_list
);