2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Eddie Dong <eddie.dong@intel.com>
26 * Jike Song <jike.song@intel.com>
29 * Zhi Wang <zhi.a.wang@intel.com>
37 * DOC: Hypervisor Service APIs for GVT-g Core Logic
39 * This is the glue layer between specific hypervisor MPT modules and GVT-g core
40 * logic. Each kind of hypervisor MPT module provides a collection of function
41 * callbacks and will be attached to GVT host when the driver is loading.
42 * GVT-g core logic will call these APIs to request specific services from
47 * intel_gvt_hypervisor_host_init - init GVT-g host side
50 * Zero on success, negative error code if failed
52 static inline int intel_gvt_hypervisor_host_init(struct device
*dev
,
53 void *gvt
, const void *ops
)
55 if (!intel_gvt_host
.mpt
->host_init
)
58 return intel_gvt_host
.mpt
->host_init(dev
, gvt
, ops
);
62 * intel_gvt_hypervisor_host_exit - exit GVT-g host side
64 static inline void intel_gvt_hypervisor_host_exit(struct device
*dev
)
66 /* optional to provide */
67 if (!intel_gvt_host
.mpt
->host_exit
)
70 intel_gvt_host
.mpt
->host_exit(dev
);
74 * intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU
75 * related stuffs inside hypervisor.
78 * Zero on success, negative error code if failed.
80 static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu
*vgpu
)
82 /* optional to provide */
83 if (!intel_gvt_host
.mpt
->attach_vgpu
)
86 return intel_gvt_host
.mpt
->attach_vgpu(vgpu
, &vgpu
->handle
);
90 * intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU
91 * related stuffs inside hypervisor.
94 * Zero on success, negative error code if failed.
96 static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu
*vgpu
)
98 /* optional to provide */
99 if (!intel_gvt_host
.mpt
->detach_vgpu
)
102 intel_gvt_host
.mpt
->detach_vgpu(vgpu
);
105 #define MSI_CAP_CONTROL(offset) (offset + 2)
106 #define MSI_CAP_ADDRESS(offset) (offset + 4)
107 #define MSI_CAP_DATA(offset) (offset + 8)
108 #define MSI_CAP_EN 0x1
111 * intel_gvt_hypervisor_inject_msi - inject a MSI interrupt into vGPU
114 * Zero on success, negative error code if failed.
116 static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu
*vgpu
)
118 unsigned long offset
= vgpu
->gvt
->device_info
.msi_cap_offset
;
123 control
= *(u16
*)(vgpu_cfg_space(vgpu
) + MSI_CAP_CONTROL(offset
));
124 addr
= *(u32
*)(vgpu_cfg_space(vgpu
) + MSI_CAP_ADDRESS(offset
));
125 data
= *(u16
*)(vgpu_cfg_space(vgpu
) + MSI_CAP_DATA(offset
));
127 /* Do not generate MSI if MSIEN is disable */
128 if (!(control
& MSI_CAP_EN
))
131 if (WARN(control
& GENMASK(15, 1), "only support one MSI format\n"))
134 trace_inject_msi(vgpu
->id
, addr
, data
);
136 ret
= intel_gvt_host
.mpt
->inject_msi(vgpu
->handle
, addr
, data
);
143 * intel_gvt_hypervisor_set_wp_page - translate a host VA into MFN
144 * @p: host kernel virtual address
147 * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
149 static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p
)
151 return intel_gvt_host
.mpt
->from_virt_to_mfn(p
);
155 * intel_gvt_hypervisor_enable_page_track - track a guest page
157 * @gfn: the gfn of guest
160 * Zero on success, negative error code if failed.
162 static inline int intel_gvt_hypervisor_enable_page_track(
163 struct intel_vgpu
*vgpu
, unsigned long gfn
)
165 return intel_gvt_host
.mpt
->enable_page_track(vgpu
->handle
, gfn
);
169 * intel_gvt_hypervisor_disable_page_track - untrack a guest page
171 * @gfn: the gfn of guest
174 * Zero on success, negative error code if failed.
176 static inline int intel_gvt_hypervisor_disable_page_track(
177 struct intel_vgpu
*vgpu
, unsigned long gfn
)
179 return intel_gvt_host
.mpt
->disable_page_track(vgpu
->handle
, gfn
);
183 * intel_gvt_hypervisor_read_gpa - copy data from GPA to host data buffer
185 * @gpa: guest physical address
186 * @buf: host data buffer
190 * Zero on success, negative error code if failed.
192 static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu
*vgpu
,
193 unsigned long gpa
, void *buf
, unsigned long len
)
195 return intel_gvt_host
.mpt
->read_gpa(vgpu
->handle
, gpa
, buf
, len
);
199 * intel_gvt_hypervisor_write_gpa - copy data from host data buffer to GPA
201 * @gpa: guest physical address
202 * @buf: host data buffer
206 * Zero on success, negative error code if failed.
208 static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu
*vgpu
,
209 unsigned long gpa
, void *buf
, unsigned long len
)
211 return intel_gvt_host
.mpt
->write_gpa(vgpu
->handle
, gpa
, buf
, len
);
215 * intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN
220 * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
222 static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
223 struct intel_vgpu
*vgpu
, unsigned long gfn
)
225 return intel_gvt_host
.mpt
->gfn_to_mfn(vgpu
->handle
, gfn
);
229 * intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page
233 * @dma_addr: retrieve allocated dma addr
236 * 0 on success, negative error code if failed.
238 static inline int intel_gvt_hypervisor_dma_map_guest_page(
239 struct intel_vgpu
*vgpu
, unsigned long gfn
, unsigned long size
,
240 dma_addr_t
*dma_addr
)
242 return intel_gvt_host
.mpt
->dma_map_guest_page(vgpu
->handle
, gfn
, size
,
247 * intel_gvt_hypervisor_dma_unmap_guest_page - cancel dma map for guest page
249 * @dma_addr: the mapped dma addr
251 static inline void intel_gvt_hypervisor_dma_unmap_guest_page(
252 struct intel_vgpu
*vgpu
, dma_addr_t dma_addr
)
254 intel_gvt_host
.mpt
->dma_unmap_guest_page(vgpu
->handle
, dma_addr
);
258 * intel_gvt_hypervisor_dma_pin_guest_page - pin guest dma buf
260 * @dma_addr: guest dma addr
263 * 0 on success, negative error code if failed.
266 intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu
*vgpu
,
269 return intel_gvt_host
.mpt
->dma_pin_guest_page(vgpu
->handle
, dma_addr
);
273 * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
277 * @nr: amount of PFNs
281 * Zero on success, negative error code if failed.
283 static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
284 struct intel_vgpu
*vgpu
, unsigned long gfn
,
285 unsigned long mfn
, unsigned int nr
,
288 /* a MPT implementation could have MMIO mapped elsewhere */
289 if (!intel_gvt_host
.mpt
->map_gfn_to_mfn
)
292 return intel_gvt_host
.mpt
->map_gfn_to_mfn(vgpu
->handle
, gfn
, mfn
, nr
,
297 * intel_gvt_hypervisor_set_trap_area - Trap a guest PA region
299 * @start: the beginning of the guest physical address region
300 * @end: the end of the guest physical address region
304 * Zero on success, negative error code if failed.
306 static inline int intel_gvt_hypervisor_set_trap_area(
307 struct intel_vgpu
*vgpu
, u64 start
, u64 end
, bool map
)
309 /* a MPT implementation could have MMIO trapped elsewhere */
310 if (!intel_gvt_host
.mpt
->set_trap_area
)
313 return intel_gvt_host
.mpt
->set_trap_area(vgpu
->handle
, start
, end
, map
);
317 * intel_gvt_hypervisor_set_opregion - Set opregion for guest
321 * Zero on success, negative error code if failed.
323 static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu
*vgpu
)
325 if (!intel_gvt_host
.mpt
->set_opregion
)
328 return intel_gvt_host
.mpt
->set_opregion(vgpu
);
332 * intel_gvt_hypervisor_set_edid - Set EDID region for guest
334 * @port_num: display port number
337 * Zero on success, negative error code if failed.
339 static inline int intel_gvt_hypervisor_set_edid(struct intel_vgpu
*vgpu
,
342 if (!intel_gvt_host
.mpt
->set_edid
)
345 return intel_gvt_host
.mpt
->set_edid(vgpu
, port_num
);
349 * intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count
353 * Zero on success, negative error code if failed.
355 static inline int intel_gvt_hypervisor_get_vfio_device(struct intel_vgpu
*vgpu
)
357 if (!intel_gvt_host
.mpt
->get_vfio_device
)
360 return intel_gvt_host
.mpt
->get_vfio_device(vgpu
);
364 * intel_gvt_hypervisor_put_vfio_device - decrease vfio device ref count
368 * Zero on success, negative error code if failed.
370 static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu
*vgpu
)
372 if (!intel_gvt_host
.mpt
->put_vfio_device
)
375 intel_gvt_host
.mpt
->put_vfio_device(vgpu
);
379 * intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn
384 * true on valid gfn, false on not.
386 static inline bool intel_gvt_hypervisor_is_valid_gfn(
387 struct intel_vgpu
*vgpu
, unsigned long gfn
)
389 if (!intel_gvt_host
.mpt
->is_valid_gfn
)
392 return intel_gvt_host
.mpt
->is_valid_gfn(vgpu
->handle
, gfn
);
395 int intel_gvt_register_hypervisor(const struct intel_gvt_mpt
*);
396 void intel_gvt_unregister_hypervisor(void);
398 #endif /* _GVT_MPT_H_ */