2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Kevin Tian <kevin.tian@intel.com>
25 * Eddie Dong <eddie.dong@intel.com>
28 * Niu Bing <bing.niu@intel.com>
29 * Zhi Wang <zhi.a.wang@intel.com>
37 #include "hypercall.h"
40 #include "interrupt.h"
45 #include "scheduler.h"
46 #include "sched_policy.h"
47 #include "mmio_context.h"
48 #include "cmd_parser.h"
49 #include "fb_decoder.h"
52 #define GVT_MAX_VGPU 8
55 INTEL_GVT_HYPERVISOR_XEN
= 0,
56 INTEL_GVT_HYPERVISOR_KVM
,
59 struct intel_gvt_host
{
62 struct intel_gvt_mpt
*mpt
;
65 extern struct intel_gvt_host intel_gvt_host
;
67 /* Describe per-platform limitations. */
68 struct intel_gvt_device_info
{
69 u32 max_support_vgpus
;
73 unsigned long msi_cap_offset
;
76 u32 gtt_entry_size_shift
;
77 int gmadr_bytes_in_cmd
;
81 /* GM resources owned by a vGPU */
82 struct intel_vgpu_gm
{
85 struct drm_mm_node low_gm_node
;
86 struct drm_mm_node high_gm_node
;
89 #define INTEL_GVT_MAX_NUM_FENCES 32
91 /* Fences owned by a vGPU */
92 struct intel_vgpu_fence
{
93 struct drm_i915_fence_reg
*regs
[INTEL_GVT_MAX_NUM_FENCES
];
98 struct intel_vgpu_mmio
{
101 bool disable_warn_untrack
;
104 #define INTEL_GVT_MAX_BAR_NUM 4
106 struct intel_vgpu_pci_bar
{
111 struct intel_vgpu_cfg_space
{
112 unsigned char virtual_cfg_space
[PCI_CFG_SPACE_EXP_SIZE
];
113 struct intel_vgpu_pci_bar bar
[INTEL_GVT_MAX_BAR_NUM
];
116 #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
118 #define INTEL_GVT_MAX_PIPE 4
120 struct intel_vgpu_irq
{
121 bool irq_warn_once
[INTEL_GVT_EVENT_MAX
];
122 DECLARE_BITMAP(flip_done_event
[INTEL_GVT_MAX_PIPE
],
123 INTEL_GVT_EVENT_MAX
);
126 struct intel_vgpu_opregion
{
129 u32 gfn
[INTEL_GVT_OPREGION_PAGES
];
132 #define vgpu_opregion(vgpu) (&(vgpu->opregion))
134 #define INTEL_GVT_MAX_PORT 5
136 struct intel_vgpu_display
{
137 struct intel_vgpu_i2c_edid i2c_edid
;
138 struct intel_vgpu_port ports
[INTEL_GVT_MAX_PORT
];
139 struct intel_vgpu_sbi sbi
;
142 struct vgpu_sched_ctl
{
147 INTEL_VGPU_EXECLIST_SUBMISSION
= 1,
148 INTEL_VGPU_GUC_SUBMISSION
,
151 struct intel_vgpu_submission_ops
{
153 int (*init
)(struct intel_vgpu
*vgpu
, unsigned long engine_mask
);
154 void (*clean
)(struct intel_vgpu
*vgpu
, unsigned long engine_mask
);
155 void (*reset
)(struct intel_vgpu
*vgpu
, unsigned long engine_mask
);
158 struct intel_vgpu_submission
{
159 struct intel_vgpu_execlist execlist
[I915_NUM_ENGINES
];
160 struct list_head workload_q_head
[I915_NUM_ENGINES
];
161 struct kmem_cache
*workloads
;
162 atomic_t running_workload_num
;
163 struct i915_gem_context
*shadow_ctx
;
164 DECLARE_BITMAP(shadow_ctx_desc_updated
, I915_NUM_ENGINES
);
165 DECLARE_BITMAP(tlb_handle_pending
, I915_NUM_ENGINES
);
166 void *ring_scan_buffer
[I915_NUM_ENGINES
];
167 int ring_scan_buffer_size
[I915_NUM_ENGINES
];
168 const struct intel_vgpu_submission_ops
*ops
;
169 int virtual_submission_interface
;
174 struct intel_gvt
*gvt
;
176 unsigned long handle
; /* vGPU handle used by hypervisor MPT modules */
180 unsigned int resetting_eng
;
182 struct vgpu_sched_ctl sched_ctl
;
184 struct intel_vgpu_fence fence
;
185 struct intel_vgpu_gm gm
;
186 struct intel_vgpu_cfg_space cfg_space
;
187 struct intel_vgpu_mmio mmio
;
188 struct intel_vgpu_irq irq
;
189 struct intel_vgpu_gtt gtt
;
190 struct intel_vgpu_opregion opregion
;
191 struct intel_vgpu_display display
;
192 struct intel_vgpu_submission submission
;
193 u32 hws_pga
[I915_NUM_ENGINES
];
195 struct dentry
*debugfs
;
197 #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
199 struct mdev_device
*mdev
;
200 struct vfio_region
*region
;
202 struct eventfd_ctx
*intx_trigger
;
203 struct eventfd_ctx
*msi_trigger
;
204 struct rb_root cache
;
205 struct mutex cache_lock
;
206 struct notifier_block iommu_notifier
;
207 struct notifier_block group_notifier
;
209 struct work_struct release_work
;
211 struct vfio_device
*vfio_device
;
215 struct list_head dmabuf_obj_list_head
;
216 struct mutex dmabuf_lock
;
217 struct idr object_idr
;
219 struct completion vblank_done
;
223 /* validating GM healthy status*/
224 #define vgpu_is_vm_unhealthy(ret_val) \
225 (((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
227 struct intel_gvt_gm
{
228 unsigned long vgpu_allocated_low_gm_size
;
229 unsigned long vgpu_allocated_high_gm_size
;
232 struct intel_gvt_fence
{
233 unsigned long vgpu_allocated_fence_num
;
236 /* Special MMIO blocks. */
237 struct gvt_mmio_block
{
245 #define INTEL_GVT_MMIO_HASH_BITS 11
247 struct intel_gvt_mmio
{
249 /* Register contains RO bits */
250 #define F_RO (1 << 0)
251 /* Register contains graphics address */
252 #define F_GMADR (1 << 1)
253 /* Mode mask registers with high 16 bits as the mask bits */
254 #define F_MODE_MASK (1 << 2)
255 /* This reg can be accessed by GPU commands */
256 #define F_CMD_ACCESS (1 << 3)
257 /* This reg has been accessed by a VM */
258 #define F_ACCESSED (1 << 4)
259 /* This reg has been accessed through GPU commands */
260 #define F_CMD_ACCESSED (1 << 5)
261 /* This reg could be accessed by unaligned address */
262 #define F_UNALIGN (1 << 6)
264 struct gvt_mmio_block
*mmio_block
;
265 unsigned int num_mmio_block
;
267 DECLARE_HASHTABLE(mmio_info_table
, INTEL_GVT_MMIO_HASH_BITS
);
268 unsigned long num_tracked_mmio
;
271 struct intel_gvt_firmware
{
274 bool firmware_loaded
;
277 #define NR_MAX_INTEL_VGPU_TYPES 20
278 struct intel_vgpu_type
{
280 unsigned int avail_instance
;
281 unsigned int low_gm_size
;
282 unsigned int high_gm_size
;
285 enum intel_vgpu_edid resolution
;
290 struct drm_i915_private
*dev_priv
;
291 struct idr vgpu_idr
; /* vGPU IDR pool */
293 struct intel_gvt_device_info device_info
;
294 struct intel_gvt_gm gm
;
295 struct intel_gvt_fence fence
;
296 struct intel_gvt_mmio mmio
;
297 struct intel_gvt_firmware firmware
;
298 struct intel_gvt_irq irq
;
299 struct intel_gvt_gtt gtt
;
300 struct intel_gvt_workload_scheduler scheduler
;
301 struct notifier_block shadow_ctx_notifier_block
[I915_NUM_ENGINES
];
302 DECLARE_HASHTABLE(cmd_table
, GVT_CMD_HASH_BITS
);
303 struct intel_vgpu_type
*types
;
304 unsigned int num_types
;
305 struct intel_vgpu
*idle_vgpu
;
307 struct task_struct
*service_thread
;
308 wait_queue_head_t service_thread_wq
;
309 unsigned long service_request
;
311 struct engine_mmio
*engine_mmio_list
;
313 struct dentry
*debugfs_root
;
316 static inline struct intel_gvt
*to_gvt(struct drm_i915_private
*i915
)
322 INTEL_GVT_REQUEST_EMULATE_VBLANK
= 0,
324 /* Scheduling trigger by timer */
325 INTEL_GVT_REQUEST_SCHED
= 1,
327 /* Scheduling trigger by event */
328 INTEL_GVT_REQUEST_EVENT_SCHED
= 2,
331 static inline void intel_gvt_request_service(struct intel_gvt
*gvt
,
334 set_bit(service
, (void *)&gvt
->service_request
);
335 wake_up(&gvt
->service_thread_wq
);
338 void intel_gvt_free_firmware(struct intel_gvt
*gvt
);
339 int intel_gvt_load_firmware(struct intel_gvt
*gvt
);
341 /* Aperture/GM space definitions for GVT device */
342 #define MB_TO_BYTES(mb) ((mb) << 20ULL)
343 #define BYTES_TO_MB(b) ((b) >> 20ULL)
345 #define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
346 #define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
349 /* Aperture/GM space definitions for GVT device */
350 #define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
351 #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
353 #define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total)
354 #define gvt_ggtt_sz(gvt) \
355 ((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
356 #define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
358 #define gvt_aperture_gmadr_base(gvt) (0)
359 #define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
360 + gvt_aperture_sz(gvt) - 1)
362 #define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \
363 + gvt_aperture_sz(gvt))
364 #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
365 + gvt_hidden_sz(gvt) - 1)
367 #define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
369 /* Aperture/GM space definitions for vGPU */
370 #define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
371 #define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start)
372 #define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz)
373 #define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz)
375 #define vgpu_aperture_pa_base(vgpu) \
376 (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
378 #define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
380 #define vgpu_aperture_pa_end(vgpu) \
381 (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
383 #define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu))
384 #define vgpu_aperture_gmadr_end(vgpu) \
385 (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
387 #define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu))
388 #define vgpu_hidden_gmadr_end(vgpu) \
389 (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
391 #define vgpu_fence_base(vgpu) (vgpu->fence.base)
392 #define vgpu_fence_sz(vgpu) (vgpu->fence.size)
394 struct intel_vgpu_creation_params
{
396 __u64 low_gm_sz
; /* in MB */
397 __u64 high_gm_sz
; /* in MB */
406 int intel_vgpu_alloc_resource(struct intel_vgpu
*vgpu
,
407 struct intel_vgpu_creation_params
*param
);
408 void intel_vgpu_reset_resource(struct intel_vgpu
*vgpu
);
409 void intel_vgpu_free_resource(struct intel_vgpu
*vgpu
);
410 void intel_vgpu_write_fence(struct intel_vgpu
*vgpu
,
411 u32 fence
, u64 value
);
413 /* Macros for easily accessing vGPU virtual/shadow register.
414 Explicitly seperate use for typed MMIO reg or real offset.*/
415 #define vgpu_vreg_t(vgpu, reg) \
416 (*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
417 #define vgpu_vreg(vgpu, offset) \
418 (*(u32 *)(vgpu->mmio.vreg + (offset)))
419 #define vgpu_vreg64_t(vgpu, reg) \
420 (*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
421 #define vgpu_vreg64(vgpu, offset) \
422 (*(u64 *)(vgpu->mmio.vreg + (offset)))
423 #define vgpu_sreg_t(vgpu, reg) \
424 (*(u32 *)(vgpu->mmio.sreg + i915_mmio_reg_offset(reg)))
425 #define vgpu_sreg(vgpu, offset) \
426 (*(u32 *)(vgpu->mmio.sreg + (offset)))
428 #define for_each_active_vgpu(gvt, vgpu, id) \
429 idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
430 for_each_if(vgpu->active)
432 static inline void intel_vgpu_write_pci_bar(struct intel_vgpu
*vgpu
,
433 u32 offset
, u32 val
, bool low
)
437 /* BAR offset should be 32 bits algiend */
438 offset
= rounddown(offset
, 4);
439 pval
= (u32
*)(vgpu_cfg_space(vgpu
) + offset
);
443 * only update bit 31 - bit 4,
444 * leave the bit 3 - bit 0 unchanged.
446 *pval
= (val
& GENMASK(31, 4)) | (*pval
& GENMASK(3, 0));
452 int intel_gvt_init_vgpu_types(struct intel_gvt
*gvt
);
453 void intel_gvt_clean_vgpu_types(struct intel_gvt
*gvt
);
455 struct intel_vgpu
*intel_gvt_create_idle_vgpu(struct intel_gvt
*gvt
);
456 void intel_gvt_destroy_idle_vgpu(struct intel_vgpu
*vgpu
);
457 struct intel_vgpu
*intel_gvt_create_vgpu(struct intel_gvt
*gvt
,
458 struct intel_vgpu_type
*type
);
459 void intel_gvt_destroy_vgpu(struct intel_vgpu
*vgpu
);
460 void intel_gvt_reset_vgpu_locked(struct intel_vgpu
*vgpu
, bool dmlr
,
461 unsigned int engine_mask
);
462 void intel_gvt_reset_vgpu(struct intel_vgpu
*vgpu
);
463 void intel_gvt_activate_vgpu(struct intel_vgpu
*vgpu
);
464 void intel_gvt_deactivate_vgpu(struct intel_vgpu
*vgpu
);
466 /* validating GM functions */
467 #define vgpu_gmadr_is_aperture(vgpu, gmadr) \
468 ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
469 (gmadr <= vgpu_aperture_gmadr_end(vgpu)))
471 #define vgpu_gmadr_is_hidden(vgpu, gmadr) \
472 ((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
473 (gmadr <= vgpu_hidden_gmadr_end(vgpu)))
475 #define vgpu_gmadr_is_valid(vgpu, gmadr) \
476 ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
477 (vgpu_gmadr_is_hidden(vgpu, gmadr))))
479 #define gvt_gmadr_is_aperture(gvt, gmadr) \
480 ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \
481 (gmadr <= gvt_aperture_gmadr_end(gvt)))
483 #define gvt_gmadr_is_hidden(gvt, gmadr) \
484 ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \
485 (gmadr <= gvt_hidden_gmadr_end(gvt)))
487 #define gvt_gmadr_is_valid(gvt, gmadr) \
488 (gvt_gmadr_is_aperture(gvt, gmadr) || \
489 gvt_gmadr_is_hidden(gvt, gmadr))
491 bool intel_gvt_ggtt_validate_range(struct intel_vgpu
*vgpu
, u64 addr
, u32 size
);
492 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu
*vgpu
, u64 g_addr
, u64
*h_addr
);
493 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu
*vgpu
, u64 h_addr
, u64
*g_addr
);
494 int intel_gvt_ggtt_index_g2h(struct intel_vgpu
*vgpu
, unsigned long g_index
,
495 unsigned long *h_index
);
496 int intel_gvt_ggtt_h2g_index(struct intel_vgpu
*vgpu
, unsigned long h_index
,
497 unsigned long *g_index
);
499 void intel_vgpu_init_cfg_space(struct intel_vgpu
*vgpu
,
501 void intel_vgpu_reset_cfg_space(struct intel_vgpu
*vgpu
);
503 int intel_vgpu_emulate_cfg_read(struct intel_vgpu
*vgpu
, unsigned int offset
,
504 void *p_data
, unsigned int bytes
);
506 int intel_vgpu_emulate_cfg_write(struct intel_vgpu
*vgpu
, unsigned int offset
,
507 void *p_data
, unsigned int bytes
);
509 static inline u64
intel_vgpu_get_bar_gpa(struct intel_vgpu
*vgpu
, int bar
)
511 /* We are 64bit bar. */
512 return (*(u64
*)(vgpu
->cfg_space
.virtual_cfg_space
+ bar
)) &
513 PCI_BASE_ADDRESS_MEM_MASK
;
516 void intel_vgpu_clean_opregion(struct intel_vgpu
*vgpu
);
517 int intel_vgpu_init_opregion(struct intel_vgpu
*vgpu
);
518 int intel_vgpu_opregion_base_write_handler(struct intel_vgpu
*vgpu
, u32 gpa
);
520 int intel_vgpu_emulate_opregion_request(struct intel_vgpu
*vgpu
, u32 swsci
);
521 void populate_pvinfo_page(struct intel_vgpu
*vgpu
);
523 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload
*workload
);
524 void enter_failsafe_mode(struct intel_vgpu
*vgpu
, int reason
);
526 struct intel_gvt_ops
{
527 int (*emulate_cfg_read
)(struct intel_vgpu
*, unsigned int, void *,
529 int (*emulate_cfg_write
)(struct intel_vgpu
*, unsigned int, void *,
531 int (*emulate_mmio_read
)(struct intel_vgpu
*, u64
, void *,
533 int (*emulate_mmio_write
)(struct intel_vgpu
*, u64
, void *,
535 struct intel_vgpu
*(*vgpu_create
)(struct intel_gvt
*,
536 struct intel_vgpu_type
*);
537 void (*vgpu_destroy
)(struct intel_vgpu
*);
538 void (*vgpu_reset
)(struct intel_vgpu
*);
539 void (*vgpu_activate
)(struct intel_vgpu
*);
540 void (*vgpu_deactivate
)(struct intel_vgpu
*);
541 struct intel_vgpu_type
*(*gvt_find_vgpu_type
)(struct intel_gvt
*gvt
,
543 bool (*get_gvt_attrs
)(struct attribute
***type_attrs
,
544 struct attribute_group
***intel_vgpu_type_groups
);
545 int (*vgpu_query_plane
)(struct intel_vgpu
*vgpu
, void *);
546 int (*vgpu_get_dmabuf
)(struct intel_vgpu
*vgpu
, unsigned int);
547 int (*write_protect_handler
)(struct intel_vgpu
*, u64
, void *,
553 GVT_FAILSAFE_UNSUPPORTED_GUEST
,
554 GVT_FAILSAFE_INSUFFICIENT_RESOURCE
,
555 GVT_FAILSAFE_GUEST_ERR
,
558 static inline void mmio_hw_access_pre(struct drm_i915_private
*dev_priv
)
560 intel_runtime_pm_get(dev_priv
);
563 static inline void mmio_hw_access_post(struct drm_i915_private
*dev_priv
)
565 intel_runtime_pm_put(dev_priv
);
569 * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
571 * @offset: register offset
574 static inline void intel_gvt_mmio_set_accessed(
575 struct intel_gvt
*gvt
, unsigned int offset
)
577 gvt
->mmio
.mmio_attribute
[offset
>> 2] |= F_ACCESSED
;
581 * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
583 * @offset: register offset
586 static inline bool intel_gvt_mmio_is_cmd_access(
587 struct intel_gvt
*gvt
, unsigned int offset
)
589 return gvt
->mmio
.mmio_attribute
[offset
>> 2] & F_CMD_ACCESS
;
593 * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
595 * @offset: register offset
598 static inline bool intel_gvt_mmio_is_unalign(
599 struct intel_gvt
*gvt
, unsigned int offset
)
601 return gvt
->mmio
.mmio_attribute
[offset
>> 2] & F_UNALIGN
;
605 * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
607 * @offset: register offset
610 static inline void intel_gvt_mmio_set_cmd_accessed(
611 struct intel_gvt
*gvt
, unsigned int offset
)
613 gvt
->mmio
.mmio_attribute
[offset
>> 2] |= F_CMD_ACCESSED
;
617 * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
619 * @offset: register offset
622 * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
625 static inline bool intel_gvt_mmio_has_mode_mask(
626 struct intel_gvt
*gvt
, unsigned int offset
)
628 return gvt
->mmio
.mmio_attribute
[offset
>> 2] & F_MODE_MASK
;
631 int intel_gvt_debugfs_add_vgpu(struct intel_vgpu
*vgpu
);
632 void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu
*vgpu
);
633 int intel_gvt_debugfs_init(struct intel_gvt
*gvt
);
634 void intel_gvt_debugfs_clean(struct intel_gvt
*gvt
);