2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 * Kevin Tian <kevin.tian@intel.com>
28 * Min He <min.he@intel.com>
29 * Bing Niu <bing.niu@intel.com>
30 * Zhi Wang <zhi.a.wang@intel.com>
37 static bool vgpu_has_pending_workload(struct intel_vgpu
*vgpu
)
39 enum intel_engine_id i
;
40 struct intel_engine_cs
*engine
;
42 for_each_engine(engine
, vgpu
->gvt
->dev_priv
, i
) {
43 if (!list_empty(workload_q_head(vgpu
, i
)))
50 struct vgpu_sched_data
{
51 struct list_head lru_list
;
52 struct intel_vgpu
*vgpu
;
54 ktime_t sched_in_time
;
55 ktime_t sched_out_time
;
60 struct vgpu_sched_ctl sched_ctl
;
63 struct gvt_sched_data
{
64 struct intel_gvt
*gvt
;
67 struct list_head lru_runq_head
;
70 static void vgpu_update_timeslice(struct intel_vgpu
*pre_vgpu
)
73 struct vgpu_sched_data
*vgpu_data
= pre_vgpu
->sched_data
;
75 delta_ts
= vgpu_data
->sched_out_time
- vgpu_data
->sched_in_time
;
77 vgpu_data
->sched_time
+= delta_ts
;
78 vgpu_data
->left_ts
-= delta_ts
;
81 #define GVT_TS_BALANCE_PERIOD_MS 100
82 #define GVT_TS_BALANCE_STAGE_NUM 10
84 static void gvt_balance_timeslice(struct gvt_sched_data
*sched_data
)
86 struct vgpu_sched_data
*vgpu_data
;
87 struct list_head
*pos
;
88 static uint64_t stage_check
;
89 int stage
= stage_check
++ % GVT_TS_BALANCE_STAGE_NUM
;
91 /* The timeslice accumulation reset at stage 0, which is
92 * allocated again without adding previous debt.
96 ktime_t fair_timeslice
;
98 list_for_each(pos
, &sched_data
->lru_runq_head
) {
99 vgpu_data
= container_of(pos
, struct vgpu_sched_data
, lru_list
);
100 total_weight
+= vgpu_data
->sched_ctl
.weight
;
103 list_for_each(pos
, &sched_data
->lru_runq_head
) {
104 vgpu_data
= container_of(pos
, struct vgpu_sched_data
, lru_list
);
105 fair_timeslice
= ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS
) *
106 vgpu_data
->sched_ctl
.weight
/
109 vgpu_data
->allocated_ts
= fair_timeslice
;
110 vgpu_data
->left_ts
= vgpu_data
->allocated_ts
;
113 list_for_each(pos
, &sched_data
->lru_runq_head
) {
114 vgpu_data
= container_of(pos
, struct vgpu_sched_data
, lru_list
);
116 /* timeslice for next 100ms should add the left/debt
117 * slice of previous stages.
119 vgpu_data
->left_ts
+= vgpu_data
->allocated_ts
;
124 static void try_to_schedule_next_vgpu(struct intel_gvt
*gvt
)
126 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
127 enum intel_engine_id i
;
128 struct intel_engine_cs
*engine
;
129 struct vgpu_sched_data
*vgpu_data
;
132 /* no target to schedule */
133 if (!scheduler
->next_vgpu
)
137 * after the flag is set, workload dispatch thread will
138 * stop dispatching workload for current vgpu
140 scheduler
->need_reschedule
= true;
142 /* still have uncompleted workload? */
143 for_each_engine(engine
, gvt
->dev_priv
, i
) {
144 if (scheduler
->current_workload
[i
])
148 cur_time
= ktime_get();
149 if (scheduler
->current_vgpu
) {
150 vgpu_data
= scheduler
->current_vgpu
->sched_data
;
151 vgpu_data
->sched_out_time
= cur_time
;
152 vgpu_update_timeslice(scheduler
->current_vgpu
);
154 vgpu_data
= scheduler
->next_vgpu
->sched_data
;
155 vgpu_data
->sched_in_time
= cur_time
;
157 /* switch current vgpu */
158 scheduler
->current_vgpu
= scheduler
->next_vgpu
;
159 scheduler
->next_vgpu
= NULL
;
161 scheduler
->need_reschedule
= false;
163 /* wake up workload dispatch thread */
164 for_each_engine(engine
, gvt
->dev_priv
, i
)
165 wake_up(&scheduler
->waitq
[i
]);
168 static struct intel_vgpu
*find_busy_vgpu(struct gvt_sched_data
*sched_data
)
170 struct vgpu_sched_data
*vgpu_data
;
171 struct intel_vgpu
*vgpu
= NULL
;
172 struct list_head
*head
= &sched_data
->lru_runq_head
;
173 struct list_head
*pos
;
175 /* search a vgpu with pending workload */
176 list_for_each(pos
, head
) {
178 vgpu_data
= container_of(pos
, struct vgpu_sched_data
, lru_list
);
179 if (!vgpu_has_pending_workload(vgpu_data
->vgpu
))
182 /* Return the vGPU only if it has time slice left */
183 if (vgpu_data
->left_ts
> 0) {
184 vgpu
= vgpu_data
->vgpu
;
193 #define GVT_DEFAULT_TIME_SLICE 1000000
195 static void tbs_sched_func(struct gvt_sched_data
*sched_data
)
197 struct intel_gvt
*gvt
= sched_data
->gvt
;
198 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
199 struct vgpu_sched_data
*vgpu_data
;
200 struct intel_vgpu
*vgpu
= NULL
;
201 static uint64_t timer_check
;
203 if (!(timer_check
++ % GVT_TS_BALANCE_PERIOD_MS
))
204 gvt_balance_timeslice(sched_data
);
206 /* no active vgpu or has already had a target */
207 if (list_empty(&sched_data
->lru_runq_head
) || scheduler
->next_vgpu
)
210 vgpu
= find_busy_vgpu(sched_data
);
212 scheduler
->next_vgpu
= vgpu
;
214 /* Move the last used vGPU to the tail of lru_list */
215 vgpu_data
= vgpu
->sched_data
;
216 list_del_init(&vgpu_data
->lru_list
);
217 list_add_tail(&vgpu_data
->lru_list
,
218 &sched_data
->lru_runq_head
);
220 scheduler
->next_vgpu
= gvt
->idle_vgpu
;
223 if (scheduler
->next_vgpu
)
224 try_to_schedule_next_vgpu(gvt
);
227 void intel_gvt_schedule(struct intel_gvt
*gvt
)
229 struct gvt_sched_data
*sched_data
= gvt
->scheduler
.sched_data
;
231 mutex_lock(&gvt
->lock
);
232 tbs_sched_func(sched_data
);
233 mutex_unlock(&gvt
->lock
);
236 static enum hrtimer_restart
tbs_timer_fn(struct hrtimer
*timer_data
)
238 struct gvt_sched_data
*data
;
240 data
= container_of(timer_data
, struct gvt_sched_data
, timer
);
242 intel_gvt_request_service(data
->gvt
, INTEL_GVT_REQUEST_SCHED
);
244 hrtimer_add_expires_ns(&data
->timer
, data
->period
);
246 return HRTIMER_RESTART
;
249 static int tbs_sched_init(struct intel_gvt
*gvt
)
251 struct intel_gvt_workload_scheduler
*scheduler
=
254 struct gvt_sched_data
*data
;
256 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
260 INIT_LIST_HEAD(&data
->lru_runq_head
);
261 hrtimer_init(&data
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
262 data
->timer
.function
= tbs_timer_fn
;
263 data
->period
= GVT_DEFAULT_TIME_SLICE
;
266 scheduler
->sched_data
= data
;
271 static void tbs_sched_clean(struct intel_gvt
*gvt
)
273 struct intel_gvt_workload_scheduler
*scheduler
=
275 struct gvt_sched_data
*data
= scheduler
->sched_data
;
277 hrtimer_cancel(&data
->timer
);
280 scheduler
->sched_data
= NULL
;
283 static int tbs_sched_init_vgpu(struct intel_vgpu
*vgpu
)
285 struct vgpu_sched_data
*data
;
287 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
291 data
->sched_ctl
.weight
= vgpu
->sched_ctl
.weight
;
293 INIT_LIST_HEAD(&data
->lru_list
);
295 vgpu
->sched_data
= data
;
300 static void tbs_sched_clean_vgpu(struct intel_vgpu
*vgpu
)
302 kfree(vgpu
->sched_data
);
303 vgpu
->sched_data
= NULL
;
306 static void tbs_sched_start_schedule(struct intel_vgpu
*vgpu
)
308 struct gvt_sched_data
*sched_data
= vgpu
->gvt
->scheduler
.sched_data
;
309 struct vgpu_sched_data
*vgpu_data
= vgpu
->sched_data
;
311 if (!list_empty(&vgpu_data
->lru_list
))
314 list_add_tail(&vgpu_data
->lru_list
, &sched_data
->lru_runq_head
);
316 if (!hrtimer_active(&sched_data
->timer
))
317 hrtimer_start(&sched_data
->timer
, ktime_add_ns(ktime_get(),
318 sched_data
->period
), HRTIMER_MODE_ABS
);
321 static void tbs_sched_stop_schedule(struct intel_vgpu
*vgpu
)
323 struct vgpu_sched_data
*vgpu_data
= vgpu
->sched_data
;
325 list_del_init(&vgpu_data
->lru_list
);
328 static struct intel_gvt_sched_policy_ops tbs_schedule_ops
= {
329 .init
= tbs_sched_init
,
330 .clean
= tbs_sched_clean
,
331 .init_vgpu
= tbs_sched_init_vgpu
,
332 .clean_vgpu
= tbs_sched_clean_vgpu
,
333 .start_schedule
= tbs_sched_start_schedule
,
334 .stop_schedule
= tbs_sched_stop_schedule
,
337 int intel_gvt_init_sched_policy(struct intel_gvt
*gvt
)
339 gvt
->scheduler
.sched_ops
= &tbs_schedule_ops
;
341 return gvt
->scheduler
.sched_ops
->init(gvt
);
344 void intel_gvt_clean_sched_policy(struct intel_gvt
*gvt
)
346 gvt
->scheduler
.sched_ops
->clean(gvt
);
349 int intel_vgpu_init_sched_policy(struct intel_vgpu
*vgpu
)
351 return vgpu
->gvt
->scheduler
.sched_ops
->init_vgpu(vgpu
);
354 void intel_vgpu_clean_sched_policy(struct intel_vgpu
*vgpu
)
356 vgpu
->gvt
->scheduler
.sched_ops
->clean_vgpu(vgpu
);
359 void intel_vgpu_start_schedule(struct intel_vgpu
*vgpu
)
361 gvt_dbg_core("vgpu%d: start schedule\n", vgpu
->id
);
363 vgpu
->gvt
->scheduler
.sched_ops
->start_schedule(vgpu
);
366 void intel_vgpu_stop_schedule(struct intel_vgpu
*vgpu
)
368 struct intel_gvt_workload_scheduler
*scheduler
=
369 &vgpu
->gvt
->scheduler
;
371 gvt_dbg_core("vgpu%d: stop schedule\n", vgpu
->id
);
373 scheduler
->sched_ops
->stop_schedule(vgpu
);
375 if (scheduler
->next_vgpu
== vgpu
)
376 scheduler
->next_vgpu
= NULL
;
378 if (scheduler
->current_vgpu
== vgpu
) {
379 /* stop workload dispatching */
380 scheduler
->need_reschedule
= true;
381 scheduler
->current_vgpu
= NULL
;