Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[cris-mirror.git] / drivers / gpu / drm / i915 / gvt / mpt.h
blob81aff4eacbfeb6732808cd2eca9b5ed426e575db
1 /*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
23 * Authors:
24 * Eddie Dong <eddie.dong@intel.com>
25 * Dexuan Cui
26 * Jike Song <jike.song@intel.com>
28 * Contributors:
29 * Zhi Wang <zhi.a.wang@intel.com>
33 #ifndef _GVT_MPT_H_
34 #define _GVT_MPT_H_
36 /**
37 * DOC: Hypervisor Service APIs for GVT-g Core Logic
39 * This is the glue layer between specific hypervisor MPT modules and GVT-g core
40 * logic. Each kind of hypervisor MPT module provides a collection of function
41 * callbacks and will be attached to GVT host when the driver is loading.
42 * GVT-g core logic will call these APIs to request specific services from
43 * hypervisor.
46 /**
47 * intel_gvt_hypervisor_host_init - init GVT-g host side
49 * Returns:
50 * Zero on success, negative error code if failed
52 static inline int intel_gvt_hypervisor_host_init(struct device *dev,
53 void *gvt, const void *ops)
55 /* optional to provide */
56 if (!intel_gvt_host.mpt->host_init)
57 return 0;
59 return intel_gvt_host.mpt->host_init(dev, gvt, ops);
62 /**
63 * intel_gvt_hypervisor_host_exit - exit GVT-g host side
65 static inline void intel_gvt_hypervisor_host_exit(struct device *dev,
66 void *gvt)
68 /* optional to provide */
69 if (!intel_gvt_host.mpt->host_exit)
70 return;
72 intel_gvt_host.mpt->host_exit(dev, gvt);
75 /**
76 * intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU
77 * related stuffs inside hypervisor.
79 * Returns:
80 * Zero on success, negative error code if failed.
82 static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
84 /* optional to provide */
85 if (!intel_gvt_host.mpt->attach_vgpu)
86 return 0;
88 return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle);
91 /**
92 * intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU
93 * related stuffs inside hypervisor.
95 * Returns:
96 * Zero on success, negative error code if failed.
98 static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
100 /* optional to provide */
101 if (!intel_gvt_host.mpt->detach_vgpu)
102 return;
104 intel_gvt_host.mpt->detach_vgpu(vgpu->handle);
107 #define MSI_CAP_CONTROL(offset) (offset + 2)
108 #define MSI_CAP_ADDRESS(offset) (offset + 4)
109 #define MSI_CAP_DATA(offset) (offset + 8)
110 #define MSI_CAP_EN 0x1
113 * intel_gvt_hypervisor_inject_msi - inject a MSI interrupt into vGPU
115 * Returns:
116 * Zero on success, negative error code if failed.
118 static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
120 unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
121 u16 control, data;
122 u32 addr;
123 int ret;
125 control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
126 addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
127 data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
129 /* Do not generate MSI if MSIEN is disable */
130 if (!(control & MSI_CAP_EN))
131 return 0;
133 if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
134 return -EINVAL;
136 trace_inject_msi(vgpu->id, addr, data);
138 ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
139 if (ret)
140 return ret;
141 return 0;
145 * intel_gvt_hypervisor_set_wp_page - translate a host VA into MFN
146 * @p: host kernel virtual address
148 * Returns:
149 * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
151 static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
153 return intel_gvt_host.mpt->from_virt_to_mfn(p);
157 * intel_gvt_hypervisor_enable - set a guest page to write-protected
158 * @vgpu: a vGPU
159 * @t: page track data structure
161 * Returns:
162 * Zero on success, negative error code if failed.
164 static inline int intel_gvt_hypervisor_enable_page_track(
165 struct intel_vgpu *vgpu,
166 struct intel_vgpu_page_track *t)
168 int ret;
170 if (t->tracked)
171 return 0;
173 ret = intel_gvt_host.mpt->set_wp_page(vgpu->handle, t->gfn);
174 if (ret)
175 return ret;
176 t->tracked = true;
177 atomic_inc(&vgpu->gtt.n_tracked_guest_page);
178 return 0;
182 * intel_gvt_hypervisor_disable_page_track - remove the write-protection of a
183 * guest page
184 * @vgpu: a vGPU
185 * @t: page track data structure
187 * Returns:
188 * Zero on success, negative error code if failed.
190 static inline int intel_gvt_hypervisor_disable_page_track(
191 struct intel_vgpu *vgpu,
192 struct intel_vgpu_page_track *t)
194 int ret;
196 if (!t->tracked)
197 return 0;
199 ret = intel_gvt_host.mpt->unset_wp_page(vgpu->handle, t->gfn);
200 if (ret)
201 return ret;
202 t->tracked = false;
203 atomic_dec(&vgpu->gtt.n_tracked_guest_page);
204 return 0;
208 * intel_gvt_hypervisor_read_gpa - copy data from GPA to host data buffer
209 * @vgpu: a vGPU
210 * @gpa: guest physical address
211 * @buf: host data buffer
212 * @len: data length
214 * Returns:
215 * Zero on success, negative error code if failed.
217 static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu,
218 unsigned long gpa, void *buf, unsigned long len)
220 return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len);
224 * intel_gvt_hypervisor_write_gpa - copy data from host data buffer to GPA
225 * @vgpu: a vGPU
226 * @gpa: guest physical address
227 * @buf: host data buffer
228 * @len: data length
230 * Returns:
231 * Zero on success, negative error code if failed.
233 static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu,
234 unsigned long gpa, void *buf, unsigned long len)
236 return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len);
240 * intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN
241 * @vgpu: a vGPU
242 * @gpfn: guest pfn
244 * Returns:
245 * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
247 static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
248 struct intel_vgpu *vgpu, unsigned long gfn)
250 return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
254 * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
255 * @vgpu: a vGPU
256 * @gfn: guest PFN
257 * @mfn: host PFN
258 * @nr: amount of PFNs
259 * @map: map or unmap
261 * Returns:
262 * Zero on success, negative error code if failed.
264 static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
265 struct intel_vgpu *vgpu, unsigned long gfn,
266 unsigned long mfn, unsigned int nr,
267 bool map)
269 /* a MPT implementation could have MMIO mapped elsewhere */
270 if (!intel_gvt_host.mpt->map_gfn_to_mfn)
271 return 0;
273 return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr,
274 map);
278 * intel_gvt_hypervisor_set_trap_area - Trap a guest PA region
279 * @vgpu: a vGPU
280 * @start: the beginning of the guest physical address region
281 * @end: the end of the guest physical address region
282 * @map: map or unmap
284 * Returns:
285 * Zero on success, negative error code if failed.
287 static inline int intel_gvt_hypervisor_set_trap_area(
288 struct intel_vgpu *vgpu, u64 start, u64 end, bool map)
290 /* a MPT implementation could have MMIO trapped elsewhere */
291 if (!intel_gvt_host.mpt->set_trap_area)
292 return 0;
294 return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
298 * intel_gvt_hypervisor_set_opregion - Set opregion for guest
299 * @vgpu: a vGPU
301 * Returns:
302 * Zero on success, negative error code if failed.
304 static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu)
306 if (!intel_gvt_host.mpt->set_opregion)
307 return 0;
309 return intel_gvt_host.mpt->set_opregion(vgpu);
313 * intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count
314 * @vgpu: a vGPU
316 * Returns:
317 * Zero on success, negative error code if failed.
319 static inline int intel_gvt_hypervisor_get_vfio_device(struct intel_vgpu *vgpu)
321 if (!intel_gvt_host.mpt->get_vfio_device)
322 return 0;
324 return intel_gvt_host.mpt->get_vfio_device(vgpu);
328 * intel_gvt_hypervisor_put_vfio_device - decrease vfio device ref count
329 * @vgpu: a vGPU
331 * Returns:
332 * Zero on success, negative error code if failed.
334 static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu)
336 if (!intel_gvt_host.mpt->put_vfio_device)
337 return;
339 intel_gvt_host.mpt->put_vfio_device(vgpu);
343 * intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn
344 * @vgpu: a vGPU
345 * @gfn: guest PFN
347 * Returns:
348 * true on valid gfn, false on not.
350 static inline bool intel_gvt_hypervisor_is_valid_gfn(
351 struct intel_vgpu *vgpu, unsigned long gfn)
353 if (!intel_gvt_host.mpt->is_valid_gfn)
354 return true;
356 return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn);
359 #endif /* _GVT_MPT_H_ */