2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Eddie Dong <eddie.dong@intel.com>
25 * Kevin Tian <kevin.tian@intel.com>
28 * Ping Gao <ping.a.gao@intel.com>
29 * Zhi Wang <zhi.a.wang@intel.com>
30 * Bing Niu <bing.niu@intel.com>
36 #include "i915_pvinfo.h"
38 void populate_pvinfo_page(struct intel_vgpu
*vgpu
)
40 /* setup the ballooning information */
41 vgpu_vreg64_t(vgpu
, vgtif_reg(magic
)) = VGT_MAGIC
;
42 vgpu_vreg_t(vgpu
, vgtif_reg(version_major
)) = 1;
43 vgpu_vreg_t(vgpu
, vgtif_reg(version_minor
)) = 0;
44 vgpu_vreg_t(vgpu
, vgtif_reg(display_ready
)) = 0;
45 vgpu_vreg_t(vgpu
, vgtif_reg(vgt_id
)) = vgpu
->id
;
47 vgpu_vreg_t(vgpu
, vgtif_reg(vgt_caps
)) = VGT_CAPS_FULL_48BIT_PPGTT
;
48 vgpu_vreg_t(vgpu
, vgtif_reg(vgt_caps
)) |= VGT_CAPS_HWSP_EMULATION
;
50 vgpu_vreg_t(vgpu
, vgtif_reg(avail_rs
.mappable_gmadr
.base
)) =
51 vgpu_aperture_gmadr_base(vgpu
);
52 vgpu_vreg_t(vgpu
, vgtif_reg(avail_rs
.mappable_gmadr
.size
)) =
53 vgpu_aperture_sz(vgpu
);
54 vgpu_vreg_t(vgpu
, vgtif_reg(avail_rs
.nonmappable_gmadr
.base
)) =
55 vgpu_hidden_gmadr_base(vgpu
);
56 vgpu_vreg_t(vgpu
, vgtif_reg(avail_rs
.nonmappable_gmadr
.size
)) =
59 vgpu_vreg_t(vgpu
, vgtif_reg(avail_rs
.fence_num
)) = vgpu_fence_sz(vgpu
);
61 gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu
->id
);
62 gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
63 vgpu_aperture_gmadr_base(vgpu
), vgpu_aperture_sz(vgpu
));
64 gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
65 vgpu_hidden_gmadr_base(vgpu
), vgpu_hidden_sz(vgpu
));
66 gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu
));
68 WARN_ON(sizeof(struct vgt_if
) != VGT_PVINFO_SIZE
);
71 #define VGPU_MAX_WEIGHT 16
72 #define VGPU_WEIGHT(vgpu_num) \
73 (VGPU_MAX_WEIGHT / (vgpu_num))
80 /* A vGPU with a weight of 8 will get twice as much GPU as a vGPU
81 * with a weight of 4 on a contended host, different vGPU type has
82 * different weight set. Legal weights range from 1 to 16.
85 enum intel_vgpu_edid edid
;
88 /* Fixed vGPU type table */
89 { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768
, "8" },
90 { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, VGPU_WEIGHT(4), GVT_EDID_1920_1200
, "4" },
91 { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, VGPU_WEIGHT(2), GVT_EDID_1920_1200
, "2" },
92 { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, VGPU_WEIGHT(1), GVT_EDID_1920_1200
, "1" },
96 * intel_gvt_init_vgpu_types - initialize vGPU type list
99 * Initialize vGPU type list based on available resource.
102 int intel_gvt_init_vgpu_types(struct intel_gvt
*gvt
)
104 unsigned int num_types
;
105 unsigned int i
, low_avail
, high_avail
;
106 unsigned int min_low
;
108 /* vGPU type name is defined as GVTg_Vx_y which contains
109 * physical GPU generation type (e.g V4 as BDW server, V5 as
112 * Depend on physical SKU resource, might see vGPU types like
113 * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
114 * different types of vGPU on same physical GPU depending on
115 * available resource. Each vGPU type will have "avail_instance"
116 * to indicate how many vGPU instance can be created for this
120 low_avail
= gvt_aperture_sz(gvt
) - HOST_LOW_GM_SIZE
;
121 high_avail
= gvt_hidden_sz(gvt
) - HOST_HIGH_GM_SIZE
;
122 num_types
= sizeof(vgpu_types
) / sizeof(vgpu_types
[0]);
124 gvt
->types
= kzalloc(num_types
* sizeof(struct intel_vgpu_type
),
129 min_low
= MB_TO_BYTES(32);
130 for (i
= 0; i
< num_types
; ++i
) {
131 if (low_avail
/ vgpu_types
[i
].low_mm
== 0)
134 gvt
->types
[i
].low_gm_size
= vgpu_types
[i
].low_mm
;
135 gvt
->types
[i
].high_gm_size
= vgpu_types
[i
].high_mm
;
136 gvt
->types
[i
].fence
= vgpu_types
[i
].fence
;
138 if (vgpu_types
[i
].weight
< 1 ||
139 vgpu_types
[i
].weight
> VGPU_MAX_WEIGHT
)
142 gvt
->types
[i
].weight
= vgpu_types
[i
].weight
;
143 gvt
->types
[i
].resolution
= vgpu_types
[i
].edid
;
144 gvt
->types
[i
].avail_instance
= min(low_avail
/ vgpu_types
[i
].low_mm
,
145 high_avail
/ vgpu_types
[i
].high_mm
);
147 if (IS_GEN8(gvt
->dev_priv
))
148 sprintf(gvt
->types
[i
].name
, "GVTg_V4_%s",
150 else if (IS_GEN9(gvt
->dev_priv
))
151 sprintf(gvt
->types
[i
].name
, "GVTg_V5_%s",
154 gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
155 i
, gvt
->types
[i
].name
,
156 gvt
->types
[i
].avail_instance
,
157 gvt
->types
[i
].low_gm_size
,
158 gvt
->types
[i
].high_gm_size
, gvt
->types
[i
].fence
,
159 gvt
->types
[i
].weight
,
160 vgpu_edid_str(gvt
->types
[i
].resolution
));
167 void intel_gvt_clean_vgpu_types(struct intel_gvt
*gvt
)
172 static void intel_gvt_update_vgpu_types(struct intel_gvt
*gvt
)
175 unsigned int low_gm_avail
, high_gm_avail
, fence_avail
;
176 unsigned int low_gm_min
, high_gm_min
, fence_min
;
178 /* Need to depend on maxium hw resource size but keep on
179 * static config for now.
181 low_gm_avail
= gvt_aperture_sz(gvt
) - HOST_LOW_GM_SIZE
-
182 gvt
->gm
.vgpu_allocated_low_gm_size
;
183 high_gm_avail
= gvt_hidden_sz(gvt
) - HOST_HIGH_GM_SIZE
-
184 gvt
->gm
.vgpu_allocated_high_gm_size
;
185 fence_avail
= gvt_fence_sz(gvt
) - HOST_FENCE
-
186 gvt
->fence
.vgpu_allocated_fence_num
;
188 for (i
= 0; i
< gvt
->num_types
; i
++) {
189 low_gm_min
= low_gm_avail
/ gvt
->types
[i
].low_gm_size
;
190 high_gm_min
= high_gm_avail
/ gvt
->types
[i
].high_gm_size
;
191 fence_min
= fence_avail
/ gvt
->types
[i
].fence
;
192 gvt
->types
[i
].avail_instance
= min(min(low_gm_min
, high_gm_min
),
195 gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n",
196 i
, gvt
->types
[i
].name
,
197 gvt
->types
[i
].avail_instance
, gvt
->types
[i
].low_gm_size
,
198 gvt
->types
[i
].high_gm_size
, gvt
->types
[i
].fence
);
203 * intel_gvt_active_vgpu - activate a virtual GPU
206 * This function is called when user wants to activate a virtual GPU.
209 void intel_gvt_activate_vgpu(struct intel_vgpu
*vgpu
)
211 mutex_lock(&vgpu
->gvt
->lock
);
213 mutex_unlock(&vgpu
->gvt
->lock
);
217 * intel_gvt_deactive_vgpu - deactivate a virtual GPU
220 * This function is called when user wants to deactivate a virtual GPU.
221 * All virtual GPU runtime information will be destroyed.
224 void intel_gvt_deactivate_vgpu(struct intel_vgpu
*vgpu
)
226 struct intel_gvt
*gvt
= vgpu
->gvt
;
228 mutex_lock(&gvt
->lock
);
230 vgpu
->active
= false;
232 if (atomic_read(&vgpu
->submission
.running_workload_num
)) {
233 mutex_unlock(&gvt
->lock
);
234 intel_gvt_wait_vgpu_idle(vgpu
);
235 mutex_lock(&gvt
->lock
);
238 intel_vgpu_stop_schedule(vgpu
);
239 intel_vgpu_dmabuf_cleanup(vgpu
);
241 mutex_unlock(&gvt
->lock
);
245 * intel_gvt_destroy_vgpu - destroy a virtual GPU
248 * This function is called when user wants to destroy a virtual GPU.
251 void intel_gvt_destroy_vgpu(struct intel_vgpu
*vgpu
)
253 struct intel_gvt
*gvt
= vgpu
->gvt
;
255 mutex_lock(&gvt
->lock
);
257 WARN(vgpu
->active
, "vGPU is still active!\n");
259 intel_gvt_debugfs_remove_vgpu(vgpu
);
260 idr_remove(&gvt
->vgpu_idr
, vgpu
->id
);
261 if (idr_is_empty(&gvt
->vgpu_idr
))
262 intel_gvt_clean_irq(gvt
);
263 intel_vgpu_clean_sched_policy(vgpu
);
264 intel_vgpu_clean_submission(vgpu
);
265 intel_vgpu_clean_display(vgpu
);
266 intel_vgpu_clean_opregion(vgpu
);
267 intel_vgpu_clean_gtt(vgpu
);
268 intel_gvt_hypervisor_detach_vgpu(vgpu
);
269 intel_vgpu_free_resource(vgpu
);
270 intel_vgpu_clean_mmio(vgpu
);
271 intel_vgpu_dmabuf_cleanup(vgpu
);
274 intel_gvt_update_vgpu_types(gvt
);
275 mutex_unlock(&gvt
->lock
);
278 #define IDLE_VGPU_IDR 0
281 * intel_gvt_create_idle_vgpu - create an idle virtual GPU
284 * This function is called when user wants to create an idle virtual GPU.
287 * pointer to intel_vgpu, error pointer if failed.
289 struct intel_vgpu
*intel_gvt_create_idle_vgpu(struct intel_gvt
*gvt
)
291 struct intel_vgpu
*vgpu
;
292 enum intel_engine_id i
;
295 vgpu
= vzalloc(sizeof(*vgpu
));
297 return ERR_PTR(-ENOMEM
);
299 vgpu
->id
= IDLE_VGPU_IDR
;
302 for (i
= 0; i
< I915_NUM_ENGINES
; i
++)
303 INIT_LIST_HEAD(&vgpu
->submission
.workload_q_head
[i
]);
305 ret
= intel_vgpu_init_sched_policy(vgpu
);
309 vgpu
->active
= false;
319 * intel_gvt_destroy_vgpu - destroy an idle virtual GPU
322 * This function is called when user wants to destroy an idle virtual GPU.
325 void intel_gvt_destroy_idle_vgpu(struct intel_vgpu
*vgpu
)
327 intel_vgpu_clean_sched_policy(vgpu
);
331 static struct intel_vgpu
*__intel_gvt_create_vgpu(struct intel_gvt
*gvt
,
332 struct intel_vgpu_creation_params
*param
)
334 struct intel_vgpu
*vgpu
;
337 gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
338 param
->handle
, param
->low_gm_sz
, param
->high_gm_sz
,
341 vgpu
= vzalloc(sizeof(*vgpu
));
343 return ERR_PTR(-ENOMEM
);
345 mutex_lock(&gvt
->lock
);
347 ret
= idr_alloc(&gvt
->vgpu_idr
, vgpu
, IDLE_VGPU_IDR
+ 1, GVT_MAX_VGPU
,
353 vgpu
->handle
= param
->handle
;
355 vgpu
->sched_ctl
.weight
= param
->weight
;
356 INIT_LIST_HEAD(&vgpu
->dmabuf_obj_list_head
);
357 idr_init(&vgpu
->object_idr
);
358 intel_vgpu_init_cfg_space(vgpu
, param
->primary
);
360 ret
= intel_vgpu_init_mmio(vgpu
);
364 ret
= intel_vgpu_alloc_resource(vgpu
, param
);
366 goto out_clean_vgpu_mmio
;
368 populate_pvinfo_page(vgpu
);
370 ret
= intel_gvt_hypervisor_attach_vgpu(vgpu
);
372 goto out_clean_vgpu_resource
;
374 ret
= intel_vgpu_init_gtt(vgpu
);
376 goto out_detach_hypervisor_vgpu
;
378 ret
= intel_vgpu_init_opregion(vgpu
);
382 ret
= intel_vgpu_init_display(vgpu
, param
->resolution
);
384 goto out_clean_opregion
;
386 ret
= intel_vgpu_setup_submission(vgpu
);
388 goto out_clean_display
;
390 ret
= intel_vgpu_init_sched_policy(vgpu
);
392 goto out_clean_submission
;
394 ret
= intel_gvt_debugfs_add_vgpu(vgpu
);
396 goto out_clean_sched_policy
;
398 ret
= intel_gvt_hypervisor_set_opregion(vgpu
);
400 goto out_clean_sched_policy
;
402 mutex_unlock(&gvt
->lock
);
406 out_clean_sched_policy
:
407 intel_vgpu_clean_sched_policy(vgpu
);
408 out_clean_submission
:
409 intel_vgpu_clean_submission(vgpu
);
411 intel_vgpu_clean_display(vgpu
);
413 intel_vgpu_clean_opregion(vgpu
);
415 intel_vgpu_clean_gtt(vgpu
);
416 out_detach_hypervisor_vgpu
:
417 intel_gvt_hypervisor_detach_vgpu(vgpu
);
418 out_clean_vgpu_resource
:
419 intel_vgpu_free_resource(vgpu
);
421 intel_vgpu_clean_mmio(vgpu
);
423 idr_remove(&gvt
->vgpu_idr
, vgpu
->id
);
426 mutex_unlock(&gvt
->lock
);
431 * intel_gvt_create_vgpu - create a virtual GPU
433 * @type: type of the vGPU to create
435 * This function is called when user wants to create a virtual GPU.
438 * pointer to intel_vgpu, error pointer if failed.
440 struct intel_vgpu
*intel_gvt_create_vgpu(struct intel_gvt
*gvt
,
441 struct intel_vgpu_type
*type
)
443 struct intel_vgpu_creation_params param
;
444 struct intel_vgpu
*vgpu
;
448 param
.low_gm_sz
= type
->low_gm_size
;
449 param
.high_gm_sz
= type
->high_gm_size
;
450 param
.fence_sz
= type
->fence
;
451 param
.weight
= type
->weight
;
452 param
.resolution
= type
->resolution
;
454 /* XXX current param based on MB */
455 param
.low_gm_sz
= BYTES_TO_MB(param
.low_gm_sz
);
456 param
.high_gm_sz
= BYTES_TO_MB(param
.high_gm_sz
);
458 vgpu
= __intel_gvt_create_vgpu(gvt
, ¶m
);
462 /* calculate left instance change for types */
463 intel_gvt_update_vgpu_types(gvt
);
469 * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset
471 * @dmlr: vGPU Device Model Level Reset or GT Reset
472 * @engine_mask: engines to reset for GT reset
474 * This function is called when user wants to reset a virtual GPU through
475 * device model reset or GT reset. The caller should hold the gvt lock.
477 * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
478 * the whole vGPU to default state as when it is created. This vGPU function
479 * is required both for functionary and security concerns.The ultimate goal
480 * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
481 * assign a vGPU to a virtual machine we must isse such reset first.
483 * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
484 * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
485 * Unlike the FLR, GT reset only reset particular resource of a vGPU per
486 * the reset request. Guest driver can issue a GT reset by programming the
487 * virtual GDRST register to reset specific virtual GPU engine or all
490 * The parameter dev_level is to identify if we will do DMLR or GT reset.
491 * The parameter engine_mask is to specific the engines that need to be
492 * resetted. If value ALL_ENGINES is given for engine_mask, it means
493 * the caller requests a full GT reset that we will reset all virtual
494 * GPU engines. For FLR, engine_mask is ignored.
496 void intel_gvt_reset_vgpu_locked(struct intel_vgpu
*vgpu
, bool dmlr
,
497 unsigned int engine_mask
)
499 struct intel_gvt
*gvt
= vgpu
->gvt
;
500 struct intel_gvt_workload_scheduler
*scheduler
= &gvt
->scheduler
;
501 unsigned int resetting_eng
= dmlr
? ALL_ENGINES
: engine_mask
;
503 gvt_dbg_core("------------------------------------------\n");
504 gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
505 vgpu
->id
, dmlr
, engine_mask
);
507 vgpu
->resetting_eng
= resetting_eng
;
509 intel_vgpu_stop_schedule(vgpu
);
511 * The current_vgpu will set to NULL after stopping the
512 * scheduler when the reset is triggered by current vgpu.
514 if (scheduler
->current_vgpu
== NULL
) {
515 mutex_unlock(&gvt
->lock
);
516 intel_gvt_wait_vgpu_idle(vgpu
);
517 mutex_lock(&gvt
->lock
);
520 intel_vgpu_reset_submission(vgpu
, resetting_eng
);
521 /* full GPU reset or device model level reset */
522 if (engine_mask
== ALL_ENGINES
|| dmlr
) {
523 intel_vgpu_select_submission_ops(vgpu
, ALL_ENGINES
, 0);
524 /*fence will not be reset during virtual reset */
526 intel_vgpu_reset_gtt(vgpu
);
527 intel_vgpu_reset_resource(vgpu
);
530 intel_vgpu_reset_mmio(vgpu
, dmlr
);
531 populate_pvinfo_page(vgpu
);
532 intel_vgpu_reset_display(vgpu
);
535 intel_vgpu_reset_cfg_space(vgpu
);
536 /* only reset the failsafe mode when dmlr reset */
537 vgpu
->failsafe
= false;
538 vgpu
->pv_notified
= false;
542 vgpu
->resetting_eng
= 0;
543 gvt_dbg_core("reset vgpu%d done\n", vgpu
->id
);
544 gvt_dbg_core("------------------------------------------\n");
548 * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level)
551 * This function is called when user wants to reset a virtual GPU.
554 void intel_gvt_reset_vgpu(struct intel_vgpu
*vgpu
)
556 mutex_lock(&vgpu
->gvt
->lock
);
557 intel_gvt_reset_vgpu_locked(vgpu
, true, 0);
558 mutex_unlock(&vgpu
->gvt
->lock
);