2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #ifndef _GPU_SCHEDULER_H_
25 #define _GPU_SCHEDULER_H_
27 #include <linux/kfifo.h>
28 #include <linux/dma-fence.h>
30 struct amd_gpu_scheduler
;
34 * A scheduler entity is a wrapper around a job queue or a group
35 * of other entities. Entities take turns emitting jobs from their
36 * job queues to corresponding hardware ring based on scheduling
39 struct amd_sched_entity
{
40 struct list_head list
;
41 struct amd_sched_rq
*rq
;
42 struct amd_gpu_scheduler
*sched
;
44 spinlock_t queue_lock
;
45 struct kfifo job_queue
;
48 uint64_t fence_context
;
50 struct dma_fence
*dependency
;
51 struct dma_fence_cb cb
;
55 * Run queue is a set of entities scheduling command submissions for
56 * one specific ring. It implements the scheduling policy that selects
57 * the next entity to emit commands from.
61 struct list_head entities
;
62 struct amd_sched_entity
*current_entity
;
65 struct amd_sched_fence
{
66 struct dma_fence scheduled
;
67 struct dma_fence finished
;
68 struct dma_fence_cb cb
;
69 struct dma_fence
*parent
;
70 struct amd_gpu_scheduler
*sched
;
75 struct amd_sched_job
{
76 struct amd_gpu_scheduler
*sched
;
77 struct amd_sched_entity
*s_entity
;
78 struct amd_sched_fence
*s_fence
;
79 struct dma_fence_cb finish_cb
;
80 struct work_struct finish_work
;
81 struct list_head node
;
82 struct delayed_work work_tdr
;
85 extern const struct dma_fence_ops amd_sched_fence_ops_scheduled
;
86 extern const struct dma_fence_ops amd_sched_fence_ops_finished
;
87 static inline struct amd_sched_fence
*to_amd_sched_fence(struct dma_fence
*f
)
89 if (f
->ops
== &amd_sched_fence_ops_scheduled
)
90 return container_of(f
, struct amd_sched_fence
, scheduled
);
92 if (f
->ops
== &amd_sched_fence_ops_finished
)
93 return container_of(f
, struct amd_sched_fence
, finished
);
99 * Define the backend operations called by the scheduler,
100 * these functions should be implemented in driver side
102 struct amd_sched_backend_ops
{
103 struct dma_fence
*(*dependency
)(struct amd_sched_job
*sched_job
);
104 struct dma_fence
*(*run_job
)(struct amd_sched_job
*sched_job
);
105 void (*timedout_job
)(struct amd_sched_job
*sched_job
);
106 void (*free_job
)(struct amd_sched_job
*sched_job
);
109 enum amd_sched_priority
{
110 AMD_SCHED_PRIORITY_KERNEL
= 0,
111 AMD_SCHED_PRIORITY_NORMAL
,
112 AMD_SCHED_MAX_PRIORITY
116 * One scheduler is implemented for each hardware ring
118 struct amd_gpu_scheduler
{
119 const struct amd_sched_backend_ops
*ops
;
120 uint32_t hw_submission_limit
;
123 struct amd_sched_rq sched_rq
[AMD_SCHED_MAX_PRIORITY
];
124 wait_queue_head_t wake_up_worker
;
125 wait_queue_head_t job_scheduled
;
126 atomic_t hw_rq_count
;
127 struct task_struct
*thread
;
128 struct list_head ring_mirror_list
;
129 spinlock_t job_list_lock
;
132 int amd_sched_init(struct amd_gpu_scheduler
*sched
,
133 const struct amd_sched_backend_ops
*ops
,
134 uint32_t hw_submission
, long timeout
, const char *name
);
135 void amd_sched_fini(struct amd_gpu_scheduler
*sched
);
137 int amd_sched_entity_init(struct amd_gpu_scheduler
*sched
,
138 struct amd_sched_entity
*entity
,
139 struct amd_sched_rq
*rq
,
141 void amd_sched_entity_fini(struct amd_gpu_scheduler
*sched
,
142 struct amd_sched_entity
*entity
);
143 void amd_sched_entity_push_job(struct amd_sched_job
*sched_job
);
145 int amd_sched_fence_slab_init(void);
146 void amd_sched_fence_slab_fini(void);
148 struct amd_sched_fence
*amd_sched_fence_create(
149 struct amd_sched_entity
*s_entity
, void *owner
);
150 void amd_sched_fence_scheduled(struct amd_sched_fence
*fence
);
151 void amd_sched_fence_finished(struct amd_sched_fence
*fence
);
152 int amd_sched_job_init(struct amd_sched_job
*job
,
153 struct amd_gpu_scheduler
*sched
,
154 struct amd_sched_entity
*entity
,
156 void amd_sched_hw_job_reset(struct amd_gpu_scheduler
*sched
);
157 void amd_sched_job_recovery(struct amd_gpu_scheduler
*sched
);