2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 * Kevin Tian <kevin.tian@intel.com>
28 * Min He <min.he@intel.com>
29 * Bing Niu <bing.niu@intel.com>
30 * Zhi Wang <zhi.a.wang@intel.com>
37 static bool vgpu_has_pending_workload(struct intel_vgpu
*vgpu
)
39 enum intel_engine_id i
;
40 struct intel_engine_cs
*engine
;
42 for_each_engine(engine
, vgpu
->gvt
->gt
, i
) {
43 if (!list_empty(workload_q_head(vgpu
, engine
)))
50 /* We give 2 seconds higher prio for vGPU during start */
51 #define GVT_SCHED_VGPU_PRI_TIME 2
53 struct vgpu_sched_data
{
54 struct list_head lru_list
;
55 struct intel_vgpu
*vgpu
;
59 ktime_t sched_in_time
;
64 struct vgpu_sched_ctl sched_ctl
;
67 struct gvt_sched_data
{
68 struct intel_gvt
*gvt
;
71 struct list_head lru_runq_head
;
75 static void vgpu_update_timeslice(struct intel_vgpu
*vgpu
, ktime_t cur_time
)
78 struct vgpu_sched_data
*vgpu_data
;
80 if (!vgpu
|| vgpu
== vgpu
->gvt
->idle_vgpu
)
83 vgpu_data
= vgpu
->sched_data
;
84 delta_ts
= ktime_sub(cur_time
, vgpu_data
->sched_in_time
);
85 vgpu_data
->sched_time
= ktime_add(vgpu_data
->sched_time
, delta_ts
);
86 vgpu_data
->left_ts
= ktime_sub(vgpu_data
->left_ts
, delta_ts
);
87 vgpu_data
->sched_in_time
= cur_time
;
90 #define GVT_TS_BALANCE_PERIOD_MS 100
91 #define GVT_TS_BALANCE_STAGE_NUM 10
93 static void gvt_balance_timeslice(struct gvt_sched_data
*sched_data
)
95 struct vgpu_sched_data
*vgpu_data
;
96 struct list_head
*pos
;
97 static u64 stage_check
;
98 int stage
= stage_check
++ % GVT_TS_BALANCE_STAGE_NUM
;
100 /* The timeslice accumulation reset at stage 0, which is
101 * allocated again without adding previous debt.
104 int total_weight
= 0;
105 ktime_t fair_timeslice
;
107 list_for_each(pos
, &sched_data
->lru_runq_head
) {
108 vgpu_data
= container_of(pos
, struct vgpu_sched_data
, lru_list
);
109 total_weight
+= vgpu_data
->sched_ctl
.weight
;
112 list_for_each(pos
, &sched_data
->lru_runq_head
) {
113 vgpu_data
= container_of(pos
, struct vgpu_sched_data
, lru_list
);
114 fair_timeslice
= ktime_divns(ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS
),
115 total_weight
) * vgpu_data
->sched_ctl
.weight
;
117 vgpu_data
->allocated_ts
= fair_timeslice
;
118 vgpu_data
->left_ts
= vgpu_data
->allocated_ts
;
121 list_for_each(pos
, &sched_data
->lru_runq_head
) {
122 vgpu_data
= container_of(pos
, struct vgpu_sched_data
, lru_list
);
124 /* timeslice for next 100ms should add the left/debt
125 * slice of previous stages.
127 vgpu_data
->left_ts
+= vgpu_data
->allocated_ts
;
132 static void try_to_schedule_next_vgpu(struct intel_gvt
*gvt
)
134 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
135 enum intel_engine_id i
;
136 struct intel_engine_cs
*engine
;
137 struct vgpu_sched_data
*vgpu_data
;
140 /* no need to schedule if next_vgpu is the same with current_vgpu,
141 * let scheduler chose next_vgpu again by setting it to NULL.
143 if (scheduler
->next_vgpu
== scheduler
->current_vgpu
) {
144 scheduler
->next_vgpu
= NULL
;
149 * after the flag is set, workload dispatch thread will
150 * stop dispatching workload for current vgpu
152 scheduler
->need_reschedule
= true;
154 /* still have uncompleted workload? */
155 for_each_engine(engine
, gvt
->gt
, i
) {
156 if (scheduler
->current_workload
[engine
->id
])
160 cur_time
= ktime_get();
161 vgpu_update_timeslice(scheduler
->current_vgpu
, cur_time
);
162 vgpu_data
= scheduler
->next_vgpu
->sched_data
;
163 vgpu_data
->sched_in_time
= cur_time
;
165 /* switch current vgpu */
166 scheduler
->current_vgpu
= scheduler
->next_vgpu
;
167 scheduler
->next_vgpu
= NULL
;
169 scheduler
->need_reschedule
= false;
171 /* wake up workload dispatch thread */
172 for_each_engine(engine
, gvt
->gt
, i
)
173 wake_up(&scheduler
->waitq
[engine
->id
]);
176 static struct intel_vgpu
*find_busy_vgpu(struct gvt_sched_data
*sched_data
)
178 struct vgpu_sched_data
*vgpu_data
;
179 struct intel_vgpu
*vgpu
= NULL
;
180 struct list_head
*head
= &sched_data
->lru_runq_head
;
181 struct list_head
*pos
;
183 /* search a vgpu with pending workload */
184 list_for_each(pos
, head
) {
186 vgpu_data
= container_of(pos
, struct vgpu_sched_data
, lru_list
);
187 if (!vgpu_has_pending_workload(vgpu_data
->vgpu
))
190 if (vgpu_data
->pri_sched
) {
191 if (ktime_before(ktime_get(), vgpu_data
->pri_time
)) {
192 vgpu
= vgpu_data
->vgpu
;
195 vgpu_data
->pri_sched
= false;
198 /* Return the vGPU only if it has time slice left */
199 if (vgpu_data
->left_ts
> 0) {
200 vgpu
= vgpu_data
->vgpu
;
209 #define GVT_DEFAULT_TIME_SLICE 1000000
211 static void tbs_sched_func(struct gvt_sched_data
*sched_data
)
213 struct intel_gvt
*gvt
= sched_data
->gvt
;
214 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
215 struct vgpu_sched_data
*vgpu_data
;
216 struct intel_vgpu
*vgpu
= NULL
;
218 /* no active vgpu or has already had a target */
219 if (list_empty(&sched_data
->lru_runq_head
) || scheduler
->next_vgpu
)
222 vgpu
= find_busy_vgpu(sched_data
);
224 scheduler
->next_vgpu
= vgpu
;
225 vgpu_data
= vgpu
->sched_data
;
226 if (!vgpu_data
->pri_sched
) {
227 /* Move the last used vGPU to the tail of lru_list */
228 list_del_init(&vgpu_data
->lru_list
);
229 list_add_tail(&vgpu_data
->lru_list
,
230 &sched_data
->lru_runq_head
);
233 scheduler
->next_vgpu
= gvt
->idle_vgpu
;
236 if (scheduler
->next_vgpu
)
237 try_to_schedule_next_vgpu(gvt
);
240 void intel_gvt_schedule(struct intel_gvt
*gvt
)
242 struct gvt_sched_data
*sched_data
= gvt
->scheduler
.sched_data
;
245 mutex_lock(&gvt
->sched_lock
);
246 cur_time
= ktime_get();
248 if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED
,
249 (void *)&gvt
->service_request
)) {
250 if (cur_time
>= sched_data
->expire_time
) {
251 gvt_balance_timeslice(sched_data
);
252 sched_data
->expire_time
= ktime_add_ms(
253 cur_time
, GVT_TS_BALANCE_PERIOD_MS
);
256 clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED
, (void *)&gvt
->service_request
);
258 vgpu_update_timeslice(gvt
->scheduler
.current_vgpu
, cur_time
);
259 tbs_sched_func(sched_data
);
261 mutex_unlock(&gvt
->sched_lock
);
264 static enum hrtimer_restart
tbs_timer_fn(struct hrtimer
*timer_data
)
266 struct gvt_sched_data
*data
;
268 data
= container_of(timer_data
, struct gvt_sched_data
, timer
);
270 intel_gvt_request_service(data
->gvt
, INTEL_GVT_REQUEST_SCHED
);
272 hrtimer_add_expires_ns(&data
->timer
, data
->period
);
274 return HRTIMER_RESTART
;
277 static int tbs_sched_init(struct intel_gvt
*gvt
)
279 struct intel_gvt_workload_scheduler
*scheduler
=
282 struct gvt_sched_data
*data
;
284 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
288 INIT_LIST_HEAD(&data
->lru_runq_head
);
289 hrtimer_init(&data
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
290 data
->timer
.function
= tbs_timer_fn
;
291 data
->period
= GVT_DEFAULT_TIME_SLICE
;
294 scheduler
->sched_data
= data
;
299 static void tbs_sched_clean(struct intel_gvt
*gvt
)
301 struct intel_gvt_workload_scheduler
*scheduler
=
303 struct gvt_sched_data
*data
= scheduler
->sched_data
;
305 hrtimer_cancel(&data
->timer
);
308 scheduler
->sched_data
= NULL
;
311 static int tbs_sched_init_vgpu(struct intel_vgpu
*vgpu
)
313 struct vgpu_sched_data
*data
;
315 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
319 data
->sched_ctl
.weight
= vgpu
->sched_ctl
.weight
;
321 INIT_LIST_HEAD(&data
->lru_list
);
323 vgpu
->sched_data
= data
;
328 static void tbs_sched_clean_vgpu(struct intel_vgpu
*vgpu
)
330 struct intel_gvt
*gvt
= vgpu
->gvt
;
331 struct gvt_sched_data
*sched_data
= gvt
->scheduler
.sched_data
;
333 kfree(vgpu
->sched_data
);
334 vgpu
->sched_data
= NULL
;
336 /* this vgpu id has been removed */
337 if (idr_is_empty(&gvt
->vgpu_idr
))
338 hrtimer_cancel(&sched_data
->timer
);
341 static void tbs_sched_start_schedule(struct intel_vgpu
*vgpu
)
343 struct gvt_sched_data
*sched_data
= vgpu
->gvt
->scheduler
.sched_data
;
344 struct vgpu_sched_data
*vgpu_data
= vgpu
->sched_data
;
347 if (!list_empty(&vgpu_data
->lru_list
))
351 vgpu_data
->pri_time
= ktime_add(now
,
352 ktime_set(GVT_SCHED_VGPU_PRI_TIME
, 0));
353 vgpu_data
->pri_sched
= true;
355 list_add(&vgpu_data
->lru_list
, &sched_data
->lru_runq_head
);
357 if (!hrtimer_active(&sched_data
->timer
))
358 hrtimer_start(&sched_data
->timer
, ktime_add_ns(ktime_get(),
359 sched_data
->period
), HRTIMER_MODE_ABS
);
360 vgpu_data
->active
= true;
363 static void tbs_sched_stop_schedule(struct intel_vgpu
*vgpu
)
365 struct vgpu_sched_data
*vgpu_data
= vgpu
->sched_data
;
367 list_del_init(&vgpu_data
->lru_list
);
368 vgpu_data
->active
= false;
371 static struct intel_gvt_sched_policy_ops tbs_schedule_ops
= {
372 .init
= tbs_sched_init
,
373 .clean
= tbs_sched_clean
,
374 .init_vgpu
= tbs_sched_init_vgpu
,
375 .clean_vgpu
= tbs_sched_clean_vgpu
,
376 .start_schedule
= tbs_sched_start_schedule
,
377 .stop_schedule
= tbs_sched_stop_schedule
,
380 int intel_gvt_init_sched_policy(struct intel_gvt
*gvt
)
384 mutex_lock(&gvt
->sched_lock
);
385 gvt
->scheduler
.sched_ops
= &tbs_schedule_ops
;
386 ret
= gvt
->scheduler
.sched_ops
->init(gvt
);
387 mutex_unlock(&gvt
->sched_lock
);
392 void intel_gvt_clean_sched_policy(struct intel_gvt
*gvt
)
394 mutex_lock(&gvt
->sched_lock
);
395 gvt
->scheduler
.sched_ops
->clean(gvt
);
396 mutex_unlock(&gvt
->sched_lock
);
399 /* for per-vgpu scheduler policy, there are 2 per-vgpu data:
400 * sched_data, and sched_ctl. We see these 2 data as part of
401 * the global scheduler which are proteced by gvt->sched_lock.
402 * Caller should make their decision if the vgpu_lock should
406 int intel_vgpu_init_sched_policy(struct intel_vgpu
*vgpu
)
410 mutex_lock(&vgpu
->gvt
->sched_lock
);
411 ret
= vgpu
->gvt
->scheduler
.sched_ops
->init_vgpu(vgpu
);
412 mutex_unlock(&vgpu
->gvt
->sched_lock
);
417 void intel_vgpu_clean_sched_policy(struct intel_vgpu
*vgpu
)
419 mutex_lock(&vgpu
->gvt
->sched_lock
);
420 vgpu
->gvt
->scheduler
.sched_ops
->clean_vgpu(vgpu
);
421 mutex_unlock(&vgpu
->gvt
->sched_lock
);
424 void intel_vgpu_start_schedule(struct intel_vgpu
*vgpu
)
426 struct vgpu_sched_data
*vgpu_data
= vgpu
->sched_data
;
428 mutex_lock(&vgpu
->gvt
->sched_lock
);
429 if (!vgpu_data
->active
) {
430 gvt_dbg_core("vgpu%d: start schedule\n", vgpu
->id
);
431 vgpu
->gvt
->scheduler
.sched_ops
->start_schedule(vgpu
);
433 mutex_unlock(&vgpu
->gvt
->sched_lock
);
436 void intel_gvt_kick_schedule(struct intel_gvt
*gvt
)
438 mutex_lock(&gvt
->sched_lock
);
439 intel_gvt_request_service(gvt
, INTEL_GVT_REQUEST_EVENT_SCHED
);
440 mutex_unlock(&gvt
->sched_lock
);
443 void intel_vgpu_stop_schedule(struct intel_vgpu
*vgpu
)
445 struct intel_gvt_workload_scheduler
*scheduler
=
446 &vgpu
->gvt
->scheduler
;
447 struct vgpu_sched_data
*vgpu_data
= vgpu
->sched_data
;
448 struct drm_i915_private
*dev_priv
= vgpu
->gvt
->gt
->i915
;
449 struct intel_engine_cs
*engine
;
450 enum intel_engine_id id
;
452 if (!vgpu_data
->active
)
455 gvt_dbg_core("vgpu%d: stop schedule\n", vgpu
->id
);
457 mutex_lock(&vgpu
->gvt
->sched_lock
);
458 scheduler
->sched_ops
->stop_schedule(vgpu
);
460 if (scheduler
->next_vgpu
== vgpu
)
461 scheduler
->next_vgpu
= NULL
;
463 if (scheduler
->current_vgpu
== vgpu
) {
464 /* stop workload dispatching */
465 scheduler
->need_reschedule
= true;
466 scheduler
->current_vgpu
= NULL
;
469 intel_runtime_pm_get(&dev_priv
->runtime_pm
);
470 spin_lock_bh(&scheduler
->mmio_context_lock
);
471 for_each_engine(engine
, vgpu
->gvt
->gt
, id
) {
472 if (scheduler
->engine_owner
[engine
->id
] == vgpu
) {
473 intel_gvt_switch_mmio(vgpu
, NULL
, engine
);
474 scheduler
->engine_owner
[engine
->id
] = NULL
;
477 spin_unlock_bh(&scheduler
->mmio_context_lock
);
478 intel_runtime_pm_put_unchecked(&dev_priv
->runtime_pm
);
479 mutex_unlock(&vgpu
->gvt
->sched_lock
);