WIP FPC-III support
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / gvt / gvt.c
blobd1d8ee4a5f16a3862d20506224edc7676a778884
1 /*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
23 * Authors:
24 * Kevin Tian <kevin.tian@intel.com>
25 * Eddie Dong <eddie.dong@intel.com>
27 * Contributors:
28 * Niu Bing <bing.niu@intel.com>
29 * Zhi Wang <zhi.a.wang@intel.com>
33 #include <linux/types.h>
34 #include <linux/kthread.h>
36 #include "i915_drv.h"
37 #include "intel_gvt.h"
38 #include "gvt.h"
39 #include <linux/vfio.h>
40 #include <linux/mdev.h>
42 struct intel_gvt_host intel_gvt_host;
44 static const char * const supported_hypervisors[] = {
45 [INTEL_GVT_HYPERVISOR_XEN] = "XEN",
46 [INTEL_GVT_HYPERVISOR_KVM] = "KVM",
49 static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
50 const char *name)
52 const char *driver_name =
53 dev_driver_string(&gvt->gt->i915->drm.pdev->dev);
54 int i;
56 name += strlen(driver_name) + 1;
57 for (i = 0; i < gvt->num_types; i++) {
58 struct intel_vgpu_type *t = &gvt->types[i];
60 if (!strncmp(t->name, name, sizeof(t->name)))
61 return t;
64 return NULL;
67 static ssize_t available_instances_show(struct kobject *kobj,
68 struct device *dev, char *buf)
70 struct intel_vgpu_type *type;
71 unsigned int num = 0;
72 void *gvt = kdev_to_i915(dev)->gvt;
74 type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
75 if (!type)
76 num = 0;
77 else
78 num = type->avail_instance;
80 return sprintf(buf, "%u\n", num);
83 static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
84 char *buf)
86 return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
89 static ssize_t description_show(struct kobject *kobj, struct device *dev,
90 char *buf)
92 struct intel_vgpu_type *type;
93 void *gvt = kdev_to_i915(dev)->gvt;
95 type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
96 if (!type)
97 return 0;
99 return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
100 "fence: %d\nresolution: %s\n"
101 "weight: %d\n",
102 BYTES_TO_MB(type->low_gm_size),
103 BYTES_TO_MB(type->high_gm_size),
104 type->fence, vgpu_edid_str(type->resolution),
105 type->weight);
108 static MDEV_TYPE_ATTR_RO(available_instances);
109 static MDEV_TYPE_ATTR_RO(device_api);
110 static MDEV_TYPE_ATTR_RO(description);
112 static struct attribute *gvt_type_attrs[] = {
113 &mdev_type_attr_available_instances.attr,
114 &mdev_type_attr_device_api.attr,
115 &mdev_type_attr_description.attr,
116 NULL,
119 static struct attribute_group *gvt_vgpu_type_groups[] = {
120 [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
123 static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups)
125 *intel_vgpu_type_groups = gvt_vgpu_type_groups;
126 return true;
129 static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
131 int i, j;
132 struct intel_vgpu_type *type;
133 struct attribute_group *group;
135 for (i = 0; i < gvt->num_types; i++) {
136 type = &gvt->types[i];
138 group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
139 if (WARN_ON(!group))
140 goto unwind;
142 group->name = type->name;
143 group->attrs = gvt_type_attrs;
144 gvt_vgpu_type_groups[i] = group;
147 return true;
149 unwind:
150 for (j = 0; j < i; j++) {
151 group = gvt_vgpu_type_groups[j];
152 kfree(group);
155 return false;
158 static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
160 int i;
161 struct attribute_group *group;
163 for (i = 0; i < gvt->num_types; i++) {
164 group = gvt_vgpu_type_groups[i];
165 gvt_vgpu_type_groups[i] = NULL;
166 kfree(group);
170 static const struct intel_gvt_ops intel_gvt_ops = {
171 .emulate_cfg_read = intel_vgpu_emulate_cfg_read,
172 .emulate_cfg_write = intel_vgpu_emulate_cfg_write,
173 .emulate_mmio_read = intel_vgpu_emulate_mmio_read,
174 .emulate_mmio_write = intel_vgpu_emulate_mmio_write,
175 .vgpu_create = intel_gvt_create_vgpu,
176 .vgpu_destroy = intel_gvt_destroy_vgpu,
177 .vgpu_release = intel_gvt_release_vgpu,
178 .vgpu_reset = intel_gvt_reset_vgpu,
179 .vgpu_activate = intel_gvt_activate_vgpu,
180 .vgpu_deactivate = intel_gvt_deactivate_vgpu,
181 .gvt_find_vgpu_type = intel_gvt_find_vgpu_type,
182 .get_gvt_attrs = intel_get_gvt_attrs,
183 .vgpu_query_plane = intel_vgpu_query_plane,
184 .vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
185 .write_protect_handler = intel_vgpu_page_track_handler,
186 .emulate_hotplug = intel_vgpu_emulate_hotplug,
189 static void init_device_info(struct intel_gvt *gvt)
191 struct intel_gvt_device_info *info = &gvt->device_info;
192 struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
194 info->max_support_vgpus = 8;
195 info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
196 info->mmio_size = 2 * 1024 * 1024;
197 info->mmio_bar = 0;
198 info->gtt_start_offset = 8 * 1024 * 1024;
199 info->gtt_entry_size = 8;
200 info->gtt_entry_size_shift = 3;
201 info->gmadr_bytes_in_cmd = 8;
202 info->max_surface_size = 36 * 1024 * 1024;
203 info->msi_cap_offset = pdev->msi_cap;
206 static int gvt_service_thread(void *data)
208 struct intel_gvt *gvt = (struct intel_gvt *)data;
209 int ret;
211 gvt_dbg_core("service thread start\n");
213 while (!kthread_should_stop()) {
214 ret = wait_event_interruptible(gvt->service_thread_wq,
215 kthread_should_stop() || gvt->service_request);
217 if (kthread_should_stop())
218 break;
220 if (WARN_ONCE(ret, "service thread is waken up by signal.\n"))
221 continue;
223 if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK,
224 (void *)&gvt->service_request))
225 intel_gvt_emulate_vblank(gvt);
227 if (test_bit(INTEL_GVT_REQUEST_SCHED,
228 (void *)&gvt->service_request) ||
229 test_bit(INTEL_GVT_REQUEST_EVENT_SCHED,
230 (void *)&gvt->service_request)) {
231 intel_gvt_schedule(gvt);
235 return 0;
238 static void clean_service_thread(struct intel_gvt *gvt)
240 kthread_stop(gvt->service_thread);
243 static int init_service_thread(struct intel_gvt *gvt)
245 init_waitqueue_head(&gvt->service_thread_wq);
247 gvt->service_thread = kthread_run(gvt_service_thread,
248 gvt, "gvt_service_thread");
249 if (IS_ERR(gvt->service_thread)) {
250 gvt_err("fail to start service thread.\n");
251 return PTR_ERR(gvt->service_thread);
253 return 0;
257 * intel_gvt_clean_device - clean a GVT device
258 * @i915: i915 private
260 * This function is called at the driver unloading stage, to free the
261 * resources owned by a GVT device.
264 void intel_gvt_clean_device(struct drm_i915_private *i915)
266 struct intel_gvt *gvt = fetch_and_zero(&i915->gvt);
268 if (drm_WARN_ON(&i915->drm, !gvt))
269 return;
271 intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
272 intel_gvt_cleanup_vgpu_type_groups(gvt);
273 intel_gvt_clean_vgpu_types(gvt);
275 intel_gvt_debugfs_clean(gvt);
276 clean_service_thread(gvt);
277 intel_gvt_clean_cmd_parser(gvt);
278 intel_gvt_clean_sched_policy(gvt);
279 intel_gvt_clean_workload_scheduler(gvt);
280 intel_gvt_clean_gtt(gvt);
281 intel_gvt_clean_irq(gvt);
282 intel_gvt_free_firmware(gvt);
283 intel_gvt_clean_mmio_info(gvt);
284 idr_destroy(&gvt->vgpu_idr);
286 kfree(i915->gvt);
290 * intel_gvt_init_device - initialize a GVT device
291 * @i915: drm i915 private data
293 * This function is called at the initialization stage, to initialize
294 * necessary GVT components.
296 * Returns:
297 * Zero on success, negative error code if failed.
300 int intel_gvt_init_device(struct drm_i915_private *i915)
302 struct intel_gvt *gvt;
303 struct intel_vgpu *vgpu;
304 int ret;
306 if (drm_WARN_ON(&i915->drm, i915->gvt))
307 return -EEXIST;
309 gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
310 if (!gvt)
311 return -ENOMEM;
313 gvt_dbg_core("init gvt device\n");
315 idr_init_base(&gvt->vgpu_idr, 1);
316 spin_lock_init(&gvt->scheduler.mmio_context_lock);
317 mutex_init(&gvt->lock);
318 mutex_init(&gvt->sched_lock);
319 gvt->gt = &i915->gt;
320 i915->gvt = gvt;
322 init_device_info(gvt);
324 ret = intel_gvt_setup_mmio_info(gvt);
325 if (ret)
326 goto out_clean_idr;
328 intel_gvt_init_engine_mmio_context(gvt);
330 ret = intel_gvt_load_firmware(gvt);
331 if (ret)
332 goto out_clean_mmio_info;
334 ret = intel_gvt_init_irq(gvt);
335 if (ret)
336 goto out_free_firmware;
338 ret = intel_gvt_init_gtt(gvt);
339 if (ret)
340 goto out_clean_irq;
342 ret = intel_gvt_init_workload_scheduler(gvt);
343 if (ret)
344 goto out_clean_gtt;
346 ret = intel_gvt_init_sched_policy(gvt);
347 if (ret)
348 goto out_clean_workload_scheduler;
350 ret = intel_gvt_init_cmd_parser(gvt);
351 if (ret)
352 goto out_clean_sched_policy;
354 ret = init_service_thread(gvt);
355 if (ret)
356 goto out_clean_cmd_parser;
358 ret = intel_gvt_init_vgpu_types(gvt);
359 if (ret)
360 goto out_clean_thread;
362 ret = intel_gvt_init_vgpu_type_groups(gvt);
363 if (ret == false) {
364 gvt_err("failed to init vgpu type groups: %d\n", ret);
365 goto out_clean_types;
368 vgpu = intel_gvt_create_idle_vgpu(gvt);
369 if (IS_ERR(vgpu)) {
370 ret = PTR_ERR(vgpu);
371 gvt_err("failed to create idle vgpu\n");
372 goto out_clean_types;
374 gvt->idle_vgpu = vgpu;
376 intel_gvt_debugfs_init(gvt);
378 gvt_dbg_core("gvt device initialization is done\n");
379 intel_gvt_host.dev = &i915->drm.pdev->dev;
380 intel_gvt_host.initialized = true;
381 return 0;
383 out_clean_types:
384 intel_gvt_clean_vgpu_types(gvt);
385 out_clean_thread:
386 clean_service_thread(gvt);
387 out_clean_cmd_parser:
388 intel_gvt_clean_cmd_parser(gvt);
389 out_clean_sched_policy:
390 intel_gvt_clean_sched_policy(gvt);
391 out_clean_workload_scheduler:
392 intel_gvt_clean_workload_scheduler(gvt);
393 out_clean_gtt:
394 intel_gvt_clean_gtt(gvt);
395 out_clean_irq:
396 intel_gvt_clean_irq(gvt);
397 out_free_firmware:
398 intel_gvt_free_firmware(gvt);
399 out_clean_mmio_info:
400 intel_gvt_clean_mmio_info(gvt);
401 out_clean_idr:
402 idr_destroy(&gvt->vgpu_idr);
403 kfree(gvt);
404 i915->gvt = NULL;
405 return ret;
409 intel_gvt_pm_resume(struct intel_gvt *gvt)
411 intel_gvt_restore_fence(gvt);
412 intel_gvt_restore_mmio(gvt);
413 intel_gvt_restore_ggtt(gvt);
414 return 0;
418 intel_gvt_register_hypervisor(const struct intel_gvt_mpt *m)
420 int ret;
421 void *gvt;
423 if (!intel_gvt_host.initialized)
424 return -ENODEV;
426 if (m->type != INTEL_GVT_HYPERVISOR_KVM &&
427 m->type != INTEL_GVT_HYPERVISOR_XEN)
428 return -EINVAL;
430 /* Get a reference for device model module */
431 if (!try_module_get(THIS_MODULE))
432 return -ENODEV;
434 intel_gvt_host.mpt = m;
435 intel_gvt_host.hypervisor_type = m->type;
436 gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
438 ret = intel_gvt_hypervisor_host_init(intel_gvt_host.dev, gvt,
439 &intel_gvt_ops);
440 if (ret < 0) {
441 gvt_err("Failed to init %s hypervisor module\n",
442 supported_hypervisors[intel_gvt_host.hypervisor_type]);
443 module_put(THIS_MODULE);
444 return -ENODEV;
446 gvt_dbg_core("Running with hypervisor %s in host mode\n",
447 supported_hypervisors[intel_gvt_host.hypervisor_type]);
448 return 0;
450 EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor);
452 void
453 intel_gvt_unregister_hypervisor(void)
455 intel_gvt_hypervisor_host_exit(intel_gvt_host.dev);
456 module_put(THIS_MODULE);
458 EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor);