2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #ifndef _DRM_GPU_SCHEDULER_H_
25 #define _DRM_GPU_SCHEDULER_H_
27 #include <drm/spsc_queue.h>
28 #include <linux/dma-fence.h>
30 struct drm_gpu_scheduler
;
33 enum drm_sched_priority
{
34 DRM_SCHED_PRIORITY_MIN
,
35 DRM_SCHED_PRIORITY_LOW
= DRM_SCHED_PRIORITY_MIN
,
36 DRM_SCHED_PRIORITY_NORMAL
,
37 DRM_SCHED_PRIORITY_HIGH_SW
,
38 DRM_SCHED_PRIORITY_HIGH_HW
,
39 DRM_SCHED_PRIORITY_KERNEL
,
40 DRM_SCHED_PRIORITY_MAX
,
41 DRM_SCHED_PRIORITY_INVALID
= -1,
42 DRM_SCHED_PRIORITY_UNSET
= -2
46 * A scheduler entity is a wrapper around a job queue or a group
47 * of other entities. Entities take turns emitting jobs from their
48 * job queues to corresponding hardware ring based on scheduling
51 struct drm_sched_entity
{
52 struct list_head list
;
53 struct drm_sched_rq
*rq
;
55 struct drm_gpu_scheduler
*sched
;
57 spinlock_t queue_lock
;
58 struct spsc_queue job_queue
;
61 uint64_t fence_context
;
63 struct dma_fence
*dependency
;
64 struct dma_fence_cb cb
;
65 atomic_t
*guilty
; /* points to ctx's guilty */
69 * Run queue is a set of entities scheduling command submissions for
70 * one specific ring. It implements the scheduling policy that selects
71 * the next entity to emit commands from.
75 struct list_head entities
;
76 struct drm_sched_entity
*current_entity
;
79 struct drm_sched_fence
{
80 struct dma_fence scheduled
;
81 struct dma_fence finished
;
82 struct dma_fence_cb cb
;
83 struct dma_fence
*parent
;
84 struct drm_gpu_scheduler
*sched
;
89 struct drm_sched_fence
*to_drm_sched_fence(struct dma_fence
*f
);
91 struct drm_sched_job
{
92 struct spsc_node queue_node
;
93 struct drm_gpu_scheduler
*sched
;
94 struct drm_sched_fence
*s_fence
;
95 struct dma_fence_cb finish_cb
;
96 struct work_struct finish_work
;
97 struct list_head node
;
98 struct delayed_work work_tdr
;
101 enum drm_sched_priority s_priority
;
104 static inline bool drm_sched_invalidate_job(struct drm_sched_job
*s_job
,
107 return (s_job
&& atomic_inc_return(&s_job
->karma
) > threshold
);
111 * Define the backend operations called by the scheduler,
112 * these functions should be implemented in driver side
114 struct drm_sched_backend_ops
{
115 struct dma_fence
*(*dependency
)(struct drm_sched_job
*sched_job
,
116 struct drm_sched_entity
*s_entity
);
117 struct dma_fence
*(*run_job
)(struct drm_sched_job
*sched_job
);
118 void (*timedout_job
)(struct drm_sched_job
*sched_job
);
119 void (*free_job
)(struct drm_sched_job
*sched_job
);
123 * One scheduler is implemented for each hardware ring
125 struct drm_gpu_scheduler
{
126 const struct drm_sched_backend_ops
*ops
;
127 uint32_t hw_submission_limit
;
130 struct drm_sched_rq sched_rq
[DRM_SCHED_PRIORITY_MAX
];
131 wait_queue_head_t wake_up_worker
;
132 wait_queue_head_t job_scheduled
;
133 atomic_t hw_rq_count
;
134 atomic64_t job_id_count
;
135 struct task_struct
*thread
;
136 struct list_head ring_mirror_list
;
137 spinlock_t job_list_lock
;
141 int drm_sched_init(struct drm_gpu_scheduler
*sched
,
142 const struct drm_sched_backend_ops
*ops
,
143 uint32_t hw_submission
, unsigned hang_limit
, long timeout
,
145 void drm_sched_fini(struct drm_gpu_scheduler
*sched
);
147 int drm_sched_entity_init(struct drm_gpu_scheduler
*sched
,
148 struct drm_sched_entity
*entity
,
149 struct drm_sched_rq
*rq
,
150 uint32_t jobs
, atomic_t
*guilty
);
151 void drm_sched_entity_fini(struct drm_gpu_scheduler
*sched
,
152 struct drm_sched_entity
*entity
);
153 void drm_sched_entity_push_job(struct drm_sched_job
*sched_job
,
154 struct drm_sched_entity
*entity
);
155 void drm_sched_entity_set_rq(struct drm_sched_entity
*entity
,
156 struct drm_sched_rq
*rq
);
158 struct drm_sched_fence
*drm_sched_fence_create(
159 struct drm_sched_entity
*s_entity
, void *owner
);
160 void drm_sched_fence_scheduled(struct drm_sched_fence
*fence
);
161 void drm_sched_fence_finished(struct drm_sched_fence
*fence
);
162 int drm_sched_job_init(struct drm_sched_job
*job
,
163 struct drm_gpu_scheduler
*sched
,
164 struct drm_sched_entity
*entity
,
166 void drm_sched_hw_job_reset(struct drm_gpu_scheduler
*sched
,
167 struct drm_sched_job
*job
);
168 void drm_sched_job_recovery(struct drm_gpu_scheduler
*sched
);
169 bool drm_sched_dependency_optimized(struct dma_fence
* fence
,
170 struct drm_sched_entity
*entity
);
171 void drm_sched_job_kickout(struct drm_sched_job
*s_job
);