2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 * Kevin Tian <kevin.tian@intel.com>
28 * Min He <min.he@intel.com>
29 * Bing Niu <bing.niu@intel.com>
30 * Zhi Wang <zhi.a.wang@intel.com>
37 static bool vgpu_has_pending_workload(struct intel_vgpu
*vgpu
)
39 enum intel_engine_id i
;
40 struct intel_engine_cs
*engine
;
42 for_each_engine(engine
, vgpu
->gvt
->dev_priv
, i
) {
43 if (!list_empty(workload_q_head(vgpu
, i
)))
50 struct vgpu_sched_data
{
51 struct list_head lru_list
;
52 struct intel_vgpu
*vgpu
;
55 ktime_t sched_in_time
;
56 ktime_t sched_out_time
;
61 struct vgpu_sched_ctl sched_ctl
;
64 struct gvt_sched_data
{
65 struct intel_gvt
*gvt
;
68 struct list_head lru_runq_head
;
71 static void vgpu_update_timeslice(struct intel_vgpu
*pre_vgpu
)
74 struct vgpu_sched_data
*vgpu_data
= pre_vgpu
->sched_data
;
76 delta_ts
= vgpu_data
->sched_out_time
- vgpu_data
->sched_in_time
;
78 vgpu_data
->sched_time
+= delta_ts
;
79 vgpu_data
->left_ts
-= delta_ts
;
82 #define GVT_TS_BALANCE_PERIOD_MS 100
83 #define GVT_TS_BALANCE_STAGE_NUM 10
85 static void gvt_balance_timeslice(struct gvt_sched_data
*sched_data
)
87 struct vgpu_sched_data
*vgpu_data
;
88 struct list_head
*pos
;
89 static uint64_t stage_check
;
90 int stage
= stage_check
++ % GVT_TS_BALANCE_STAGE_NUM
;
92 /* The timeslice accumulation reset at stage 0, which is
93 * allocated again without adding previous debt.
97 ktime_t fair_timeslice
;
99 list_for_each(pos
, &sched_data
->lru_runq_head
) {
100 vgpu_data
= container_of(pos
, struct vgpu_sched_data
, lru_list
);
101 total_weight
+= vgpu_data
->sched_ctl
.weight
;
104 list_for_each(pos
, &sched_data
->lru_runq_head
) {
105 vgpu_data
= container_of(pos
, struct vgpu_sched_data
, lru_list
);
106 fair_timeslice
= ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS
) *
107 vgpu_data
->sched_ctl
.weight
/
110 vgpu_data
->allocated_ts
= fair_timeslice
;
111 vgpu_data
->left_ts
= vgpu_data
->allocated_ts
;
114 list_for_each(pos
, &sched_data
->lru_runq_head
) {
115 vgpu_data
= container_of(pos
, struct vgpu_sched_data
, lru_list
);
117 /* timeslice for next 100ms should add the left/debt
118 * slice of previous stages.
120 vgpu_data
->left_ts
+= vgpu_data
->allocated_ts
;
125 static void try_to_schedule_next_vgpu(struct intel_gvt
*gvt
)
127 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
128 enum intel_engine_id i
;
129 struct intel_engine_cs
*engine
;
130 struct vgpu_sched_data
*vgpu_data
;
133 /* no need to schedule if next_vgpu is the same with current_vgpu,
134 * let scheduler chose next_vgpu again by setting it to NULL.
136 if (scheduler
->next_vgpu
== scheduler
->current_vgpu
) {
137 scheduler
->next_vgpu
= NULL
;
142 * after the flag is set, workload dispatch thread will
143 * stop dispatching workload for current vgpu
145 scheduler
->need_reschedule
= true;
147 /* still have uncompleted workload? */
148 for_each_engine(engine
, gvt
->dev_priv
, i
) {
149 if (scheduler
->current_workload
[i
])
153 cur_time
= ktime_get();
154 if (scheduler
->current_vgpu
) {
155 vgpu_data
= scheduler
->current_vgpu
->sched_data
;
156 vgpu_data
->sched_out_time
= cur_time
;
157 vgpu_update_timeslice(scheduler
->current_vgpu
);
159 vgpu_data
= scheduler
->next_vgpu
->sched_data
;
160 vgpu_data
->sched_in_time
= cur_time
;
162 /* switch current vgpu */
163 scheduler
->current_vgpu
= scheduler
->next_vgpu
;
164 scheduler
->next_vgpu
= NULL
;
166 scheduler
->need_reschedule
= false;
168 /* wake up workload dispatch thread */
169 for_each_engine(engine
, gvt
->dev_priv
, i
)
170 wake_up(&scheduler
->waitq
[i
]);
173 static struct intel_vgpu
*find_busy_vgpu(struct gvt_sched_data
*sched_data
)
175 struct vgpu_sched_data
*vgpu_data
;
176 struct intel_vgpu
*vgpu
= NULL
;
177 struct list_head
*head
= &sched_data
->lru_runq_head
;
178 struct list_head
*pos
;
180 /* search a vgpu with pending workload */
181 list_for_each(pos
, head
) {
183 vgpu_data
= container_of(pos
, struct vgpu_sched_data
, lru_list
);
184 if (!vgpu_has_pending_workload(vgpu_data
->vgpu
))
187 /* Return the vGPU only if it has time slice left */
188 if (vgpu_data
->left_ts
> 0) {
189 vgpu
= vgpu_data
->vgpu
;
198 #define GVT_DEFAULT_TIME_SLICE 1000000
200 static void tbs_sched_func(struct gvt_sched_data
*sched_data
)
202 struct intel_gvt
*gvt
= sched_data
->gvt
;
203 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
204 struct vgpu_sched_data
*vgpu_data
;
205 struct intel_vgpu
*vgpu
= NULL
;
206 /* no active vgpu or has already had a target */
207 if (list_empty(&sched_data
->lru_runq_head
) || scheduler
->next_vgpu
)
210 vgpu
= find_busy_vgpu(sched_data
);
212 scheduler
->next_vgpu
= vgpu
;
214 /* Move the last used vGPU to the tail of lru_list */
215 vgpu_data
= vgpu
->sched_data
;
216 list_del_init(&vgpu_data
->lru_list
);
217 list_add_tail(&vgpu_data
->lru_list
,
218 &sched_data
->lru_runq_head
);
220 scheduler
->next_vgpu
= gvt
->idle_vgpu
;
223 if (scheduler
->next_vgpu
)
224 try_to_schedule_next_vgpu(gvt
);
227 void intel_gvt_schedule(struct intel_gvt
*gvt
)
229 struct gvt_sched_data
*sched_data
= gvt
->scheduler
.sched_data
;
230 static uint64_t timer_check
;
232 mutex_lock(&gvt
->lock
);
234 if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED
,
235 (void *)&gvt
->service_request
)) {
236 if (!(timer_check
++ % GVT_TS_BALANCE_PERIOD_MS
))
237 gvt_balance_timeslice(sched_data
);
239 clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED
, (void *)&gvt
->service_request
);
241 tbs_sched_func(sched_data
);
243 mutex_unlock(&gvt
->lock
);
246 static enum hrtimer_restart
tbs_timer_fn(struct hrtimer
*timer_data
)
248 struct gvt_sched_data
*data
;
250 data
= container_of(timer_data
, struct gvt_sched_data
, timer
);
252 intel_gvt_request_service(data
->gvt
, INTEL_GVT_REQUEST_SCHED
);
254 hrtimer_add_expires_ns(&data
->timer
, data
->period
);
256 return HRTIMER_RESTART
;
259 static int tbs_sched_init(struct intel_gvt
*gvt
)
261 struct intel_gvt_workload_scheduler
*scheduler
=
264 struct gvt_sched_data
*data
;
266 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
270 INIT_LIST_HEAD(&data
->lru_runq_head
);
271 hrtimer_init(&data
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
272 data
->timer
.function
= tbs_timer_fn
;
273 data
->period
= GVT_DEFAULT_TIME_SLICE
;
276 scheduler
->sched_data
= data
;
281 static void tbs_sched_clean(struct intel_gvt
*gvt
)
283 struct intel_gvt_workload_scheduler
*scheduler
=
285 struct gvt_sched_data
*data
= scheduler
->sched_data
;
287 hrtimer_cancel(&data
->timer
);
290 scheduler
->sched_data
= NULL
;
293 static int tbs_sched_init_vgpu(struct intel_vgpu
*vgpu
)
295 struct vgpu_sched_data
*data
;
297 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
301 data
->sched_ctl
.weight
= vgpu
->sched_ctl
.weight
;
303 INIT_LIST_HEAD(&data
->lru_list
);
305 vgpu
->sched_data
= data
;
310 static void tbs_sched_clean_vgpu(struct intel_vgpu
*vgpu
)
312 struct intel_gvt
*gvt
= vgpu
->gvt
;
313 struct gvt_sched_data
*sched_data
= gvt
->scheduler
.sched_data
;
315 kfree(vgpu
->sched_data
);
316 vgpu
->sched_data
= NULL
;
318 /* this vgpu id has been removed */
319 if (idr_is_empty(&gvt
->vgpu_idr
))
320 hrtimer_cancel(&sched_data
->timer
);
323 static void tbs_sched_start_schedule(struct intel_vgpu
*vgpu
)
325 struct gvt_sched_data
*sched_data
= vgpu
->gvt
->scheduler
.sched_data
;
326 struct vgpu_sched_data
*vgpu_data
= vgpu
->sched_data
;
328 if (!list_empty(&vgpu_data
->lru_list
))
331 list_add_tail(&vgpu_data
->lru_list
, &sched_data
->lru_runq_head
);
333 if (!hrtimer_active(&sched_data
->timer
))
334 hrtimer_start(&sched_data
->timer
, ktime_add_ns(ktime_get(),
335 sched_data
->period
), HRTIMER_MODE_ABS
);
336 vgpu_data
->active
= true;
339 static void tbs_sched_stop_schedule(struct intel_vgpu
*vgpu
)
341 struct vgpu_sched_data
*vgpu_data
= vgpu
->sched_data
;
343 list_del_init(&vgpu_data
->lru_list
);
344 vgpu_data
->active
= false;
347 static struct intel_gvt_sched_policy_ops tbs_schedule_ops
= {
348 .init
= tbs_sched_init
,
349 .clean
= tbs_sched_clean
,
350 .init_vgpu
= tbs_sched_init_vgpu
,
351 .clean_vgpu
= tbs_sched_clean_vgpu
,
352 .start_schedule
= tbs_sched_start_schedule
,
353 .stop_schedule
= tbs_sched_stop_schedule
,
356 int intel_gvt_init_sched_policy(struct intel_gvt
*gvt
)
358 gvt
->scheduler
.sched_ops
= &tbs_schedule_ops
;
360 return gvt
->scheduler
.sched_ops
->init(gvt
);
363 void intel_gvt_clean_sched_policy(struct intel_gvt
*gvt
)
365 gvt
->scheduler
.sched_ops
->clean(gvt
);
368 int intel_vgpu_init_sched_policy(struct intel_vgpu
*vgpu
)
370 return vgpu
->gvt
->scheduler
.sched_ops
->init_vgpu(vgpu
);
373 void intel_vgpu_clean_sched_policy(struct intel_vgpu
*vgpu
)
375 vgpu
->gvt
->scheduler
.sched_ops
->clean_vgpu(vgpu
);
378 void intel_vgpu_start_schedule(struct intel_vgpu
*vgpu
)
380 struct vgpu_sched_data
*vgpu_data
= vgpu
->sched_data
;
382 if (!vgpu_data
->active
) {
383 gvt_dbg_core("vgpu%d: start schedule\n", vgpu
->id
);
384 vgpu
->gvt
->scheduler
.sched_ops
->start_schedule(vgpu
);
388 void intel_gvt_kick_schedule(struct intel_gvt
*gvt
)
390 intel_gvt_request_service(gvt
, INTEL_GVT_REQUEST_EVENT_SCHED
);
393 void intel_vgpu_stop_schedule(struct intel_vgpu
*vgpu
)
395 struct intel_gvt_workload_scheduler
*scheduler
=
396 &vgpu
->gvt
->scheduler
;
398 struct vgpu_sched_data
*vgpu_data
= vgpu
->sched_data
;
400 if (!vgpu_data
->active
)
403 gvt_dbg_core("vgpu%d: stop schedule\n", vgpu
->id
);
405 scheduler
->sched_ops
->stop_schedule(vgpu
);
407 if (scheduler
->next_vgpu
== vgpu
)
408 scheduler
->next_vgpu
= NULL
;
410 if (scheduler
->current_vgpu
== vgpu
) {
411 /* stop workload dispatching */
412 scheduler
->need_reschedule
= true;
413 scheduler
->current_vgpu
= NULL
;
416 spin_lock_bh(&scheduler
->mmio_context_lock
);
417 for (ring_id
= 0; ring_id
< I915_NUM_ENGINES
; ring_id
++) {
418 if (scheduler
->engine_owner
[ring_id
] == vgpu
) {
419 intel_gvt_switch_mmio(vgpu
, NULL
, ring_id
);
420 scheduler
->engine_owner
[ring_id
] = NULL
;
423 spin_unlock_bh(&scheduler
->mmio_context_lock
);