vfio-mdev: Make mdev_device private and abstract interfaces
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / gvt / kvmgt.c
blobf8021a01df63da26e399701b49f6ebd4fed26245
1 /*
2 * KVMGT - the implementation of Intel mediated pass-through framework for KVM
4 * Copyright(c) 2014-2016 Intel Corporation. All rights reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
25 * Authors:
26 * Kevin Tian <kevin.tian@intel.com>
27 * Jike Song <jike.song@intel.com>
28 * Xiaoguang Chen <xiaoguang.chen@intel.com>
31 #include <linux/init.h>
32 #include <linux/device.h>
33 #include <linux/mm.h>
34 #include <linux/mmu_context.h>
35 #include <linux/types.h>
36 #include <linux/list.h>
37 #include <linux/rbtree.h>
38 #include <linux/spinlock.h>
39 #include <linux/eventfd.h>
40 #include <linux/uuid.h>
41 #include <linux/kvm_host.h>
42 #include <linux/vfio.h>
43 #include <linux/mdev.h>
45 #include "i915_drv.h"
46 #include "gvt.h"
48 static const struct intel_gvt_ops *intel_gvt_ops;
50 /* helper macros copied from vfio-pci */
51 #define VFIO_PCI_OFFSET_SHIFT 40
52 #define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
53 #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
54 #define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
56 struct vfio_region {
57 u32 type;
58 u32 subtype;
59 size_t size;
60 u32 flags;
63 struct kvmgt_pgfn {
64 gfn_t gfn;
65 struct hlist_node hnode;
68 struct kvmgt_guest_info {
69 struct kvm *kvm;
70 struct intel_vgpu *vgpu;
71 struct kvm_page_track_notifier_node track_node;
72 #define NR_BKT (1 << 18)
73 struct hlist_head ptable[NR_BKT];
74 #undef NR_BKT
77 struct gvt_dma {
78 struct rb_node node;
79 gfn_t gfn;
80 kvm_pfn_t pfn;
83 static inline bool handle_valid(unsigned long handle)
85 return !!(handle & ~0xff);
88 static int kvmgt_guest_init(struct mdev_device *mdev);
89 static void intel_vgpu_release_work(struct work_struct *work);
90 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
92 static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
94 struct rb_node *node = vgpu->vdev.cache.rb_node;
95 struct gvt_dma *ret = NULL;
97 while (node) {
98 struct gvt_dma *itr = rb_entry(node, struct gvt_dma, node);
100 if (gfn < itr->gfn)
101 node = node->rb_left;
102 else if (gfn > itr->gfn)
103 node = node->rb_right;
104 else {
105 ret = itr;
106 goto out;
110 out:
111 return ret;
114 static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
116 struct gvt_dma *entry;
118 mutex_lock(&vgpu->vdev.cache_lock);
119 entry = __gvt_cache_find(vgpu, gfn);
120 mutex_unlock(&vgpu->vdev.cache_lock);
122 return entry == NULL ? 0 : entry->pfn;
125 static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
127 struct gvt_dma *new, *itr;
128 struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL;
130 new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
131 if (!new)
132 return;
134 new->gfn = gfn;
135 new->pfn = pfn;
137 mutex_lock(&vgpu->vdev.cache_lock);
138 while (*link) {
139 parent = *link;
140 itr = rb_entry(parent, struct gvt_dma, node);
142 if (gfn == itr->gfn)
143 goto out;
144 else if (gfn < itr->gfn)
145 link = &parent->rb_left;
146 else
147 link = &parent->rb_right;
150 rb_link_node(&new->node, parent, link);
151 rb_insert_color(&new->node, &vgpu->vdev.cache);
152 mutex_unlock(&vgpu->vdev.cache_lock);
153 return;
155 out:
156 mutex_unlock(&vgpu->vdev.cache_lock);
157 kfree(new);
160 static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
161 struct gvt_dma *entry)
163 rb_erase(&entry->node, &vgpu->vdev.cache);
164 kfree(entry);
167 static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
169 struct device *dev = mdev_dev(vgpu->vdev.mdev);
170 struct gvt_dma *this;
171 unsigned long g1;
172 int rc;
174 mutex_lock(&vgpu->vdev.cache_lock);
175 this = __gvt_cache_find(vgpu, gfn);
176 if (!this) {
177 mutex_unlock(&vgpu->vdev.cache_lock);
178 return;
181 g1 = gfn;
182 rc = vfio_unpin_pages(dev, &g1, 1);
183 WARN_ON(rc != 1);
184 __gvt_cache_remove_entry(vgpu, this);
185 mutex_unlock(&vgpu->vdev.cache_lock);
188 static void gvt_cache_init(struct intel_vgpu *vgpu)
190 vgpu->vdev.cache = RB_ROOT;
191 mutex_init(&vgpu->vdev.cache_lock);
194 static void gvt_cache_destroy(struct intel_vgpu *vgpu)
196 struct gvt_dma *dma;
197 struct rb_node *node = NULL;
198 struct device *dev = mdev_dev(vgpu->vdev.mdev);
199 unsigned long gfn;
201 mutex_lock(&vgpu->vdev.cache_lock);
202 while ((node = rb_first(&vgpu->vdev.cache))) {
203 dma = rb_entry(node, struct gvt_dma, node);
204 gfn = dma->gfn;
206 vfio_unpin_pages(dev, &gfn, 1);
207 __gvt_cache_remove_entry(vgpu, dma);
209 mutex_unlock(&vgpu->vdev.cache_lock);
212 static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
213 const char *name)
215 int i;
216 struct intel_vgpu_type *t;
217 const char *driver_name = dev_driver_string(
218 &gvt->dev_priv->drm.pdev->dev);
220 for (i = 0; i < gvt->num_types; i++) {
221 t = &gvt->types[i];
222 if (!strncmp(t->name, name + strlen(driver_name) + 1,
223 sizeof(t->name)))
224 return t;
227 return NULL;
230 static ssize_t available_instance_show(struct kobject *kobj, struct device *dev,
231 char *buf)
233 struct intel_vgpu_type *type;
234 unsigned int num = 0;
235 void *gvt = kdev_to_i915(dev)->gvt;
237 type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
238 if (!type)
239 num = 0;
240 else
241 num = type->avail_instance;
243 return sprintf(buf, "%u\n", num);
246 static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
247 char *buf)
249 return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
252 static ssize_t description_show(struct kobject *kobj, struct device *dev,
253 char *buf)
255 struct intel_vgpu_type *type;
256 void *gvt = kdev_to_i915(dev)->gvt;
258 type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
259 if (!type)
260 return 0;
262 return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
263 "fence: %d\n",
264 BYTES_TO_MB(type->low_gm_size),
265 BYTES_TO_MB(type->high_gm_size),
266 type->fence);
269 static MDEV_TYPE_ATTR_RO(available_instance);
270 static MDEV_TYPE_ATTR_RO(device_api);
271 static MDEV_TYPE_ATTR_RO(description);
273 static struct attribute *type_attrs[] = {
274 &mdev_type_attr_available_instance.attr,
275 &mdev_type_attr_device_api.attr,
276 &mdev_type_attr_description.attr,
277 NULL,
280 static struct attribute_group *intel_vgpu_type_groups[] = {
281 [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
284 static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
286 int i, j;
287 struct intel_vgpu_type *type;
288 struct attribute_group *group;
290 for (i = 0; i < gvt->num_types; i++) {
291 type = &gvt->types[i];
293 group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
294 if (WARN_ON(!group))
295 goto unwind;
297 group->name = type->name;
298 group->attrs = type_attrs;
299 intel_vgpu_type_groups[i] = group;
302 return true;
304 unwind:
305 for (j = 0; j < i; j++) {
306 group = intel_vgpu_type_groups[j];
307 kfree(group);
310 return false;
313 static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
315 int i;
316 struct attribute_group *group;
318 for (i = 0; i < gvt->num_types; i++) {
319 group = intel_vgpu_type_groups[i];
320 kfree(group);
324 static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
326 hash_init(info->ptable);
329 static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
331 struct kvmgt_pgfn *p;
332 struct hlist_node *tmp;
333 int i;
335 hash_for_each_safe(info->ptable, i, tmp, p, hnode) {
336 hash_del(&p->hnode);
337 kfree(p);
341 static struct kvmgt_pgfn *
342 __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
344 struct kvmgt_pgfn *p, *res = NULL;
346 hash_for_each_possible(info->ptable, p, hnode, gfn) {
347 if (gfn == p->gfn) {
348 res = p;
349 break;
353 return res;
356 static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
357 gfn_t gfn)
359 struct kvmgt_pgfn *p;
361 p = __kvmgt_protect_table_find(info, gfn);
362 return !!p;
365 static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
367 struct kvmgt_pgfn *p;
369 if (kvmgt_gfn_is_write_protected(info, gfn))
370 return;
372 p = kzalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
373 if (WARN(!p, "gfn: 0x%llx\n", gfn))
374 return;
376 p->gfn = gfn;
377 hash_add(info->ptable, &p->hnode, gfn);
380 static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
381 gfn_t gfn)
383 struct kvmgt_pgfn *p;
385 p = __kvmgt_protect_table_find(info, gfn);
386 if (p) {
387 hash_del(&p->hnode);
388 kfree(p);
392 static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
394 struct intel_vgpu *vgpu;
395 struct intel_vgpu_type *type;
396 struct device *pdev;
397 void *gvt;
399 pdev = mdev_parent_dev(mdev);
400 gvt = kdev_to_i915(pdev)->gvt;
402 type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
403 if (!type) {
404 gvt_err("failed to find type %s to create\n",
405 kobject_name(kobj));
406 return -EINVAL;
409 vgpu = intel_gvt_ops->vgpu_create(gvt, type);
410 if (IS_ERR_OR_NULL(vgpu)) {
411 gvt_err("create intel vgpu failed\n");
412 return -EINVAL;
415 INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
417 vgpu->vdev.mdev = mdev;
418 mdev_set_drvdata(mdev, vgpu);
420 gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
421 dev_name(mdev_dev(mdev)));
422 return 0;
425 static int intel_vgpu_remove(struct mdev_device *mdev)
427 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
429 if (handle_valid(vgpu->handle))
430 return -EBUSY;
432 intel_gvt_ops->vgpu_destroy(vgpu);
433 return 0;
436 static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
437 unsigned long action, void *data)
439 struct intel_vgpu *vgpu = container_of(nb,
440 struct intel_vgpu,
441 vdev.iommu_notifier);
443 if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
444 struct vfio_iommu_type1_dma_unmap *unmap = data;
445 unsigned long gfn, end_gfn;
447 gfn = unmap->iova >> PAGE_SHIFT;
448 end_gfn = gfn + unmap->size / PAGE_SIZE;
450 while (gfn < end_gfn)
451 gvt_cache_remove(vgpu, gfn++);
454 return NOTIFY_OK;
457 static int intel_vgpu_group_notifier(struct notifier_block *nb,
458 unsigned long action, void *data)
460 struct intel_vgpu *vgpu = container_of(nb,
461 struct intel_vgpu,
462 vdev.group_notifier);
464 /* the only action we care about */
465 if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
466 vgpu->vdev.kvm = data;
468 if (!data)
469 schedule_work(&vgpu->vdev.release_work);
472 return NOTIFY_OK;
475 static int intel_vgpu_open(struct mdev_device *mdev)
477 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
478 unsigned long events;
479 int ret;
481 vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
482 vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
484 events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
485 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
486 &vgpu->vdev.iommu_notifier);
487 if (ret != 0) {
488 gvt_err("vfio_register_notifier for iommu failed: %d\n", ret);
489 goto out;
492 events = VFIO_GROUP_NOTIFY_SET_KVM;
493 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
494 &vgpu->vdev.group_notifier);
495 if (ret != 0) {
496 gvt_err("vfio_register_notifier for group failed: %d\n", ret);
497 goto undo_iommu;
500 return kvmgt_guest_init(mdev);
502 undo_iommu:
503 vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
504 &vgpu->vdev.iommu_notifier);
505 out:
506 return ret;
509 static void __intel_vgpu_release(struct intel_vgpu *vgpu)
511 struct kvmgt_guest_info *info;
513 if (!handle_valid(vgpu->handle))
514 return;
516 vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
517 &vgpu->vdev.iommu_notifier);
518 vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY,
519 &vgpu->vdev.group_notifier);
521 info = (struct kvmgt_guest_info *)vgpu->handle;
522 kvmgt_guest_exit(info);
523 vgpu->handle = 0;
526 static void intel_vgpu_release(struct mdev_device *mdev)
528 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
530 __intel_vgpu_release(vgpu);
533 static void intel_vgpu_release_work(struct work_struct *work)
535 struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
536 vdev.release_work);
537 __intel_vgpu_release(vgpu);
540 static uint64_t intel_vgpu_get_bar0_addr(struct intel_vgpu *vgpu)
542 u32 start_lo, start_hi;
543 u32 mem_type;
544 int pos = PCI_BASE_ADDRESS_0;
546 start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) &
547 PCI_BASE_ADDRESS_MEM_MASK;
548 mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) &
549 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
551 switch (mem_type) {
552 case PCI_BASE_ADDRESS_MEM_TYPE_64:
553 start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
554 + pos + 4));
555 break;
556 case PCI_BASE_ADDRESS_MEM_TYPE_32:
557 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
558 /* 1M mem BAR treated as 32-bit BAR */
559 default:
560 /* mem unknown type treated as 32-bit BAR */
561 start_hi = 0;
562 break;
565 return ((u64)start_hi << 32) | start_lo;
568 static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
569 size_t count, loff_t *ppos, bool is_write)
571 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
572 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
573 uint64_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
574 int ret = -EINVAL;
577 if (index >= VFIO_PCI_NUM_REGIONS) {
578 gvt_err("invalid index: %u\n", index);
579 return -EINVAL;
582 switch (index) {
583 case VFIO_PCI_CONFIG_REGION_INDEX:
584 if (is_write)
585 ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos,
586 buf, count);
587 else
588 ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos,
589 buf, count);
590 break;
591 case VFIO_PCI_BAR0_REGION_INDEX:
592 case VFIO_PCI_BAR1_REGION_INDEX:
593 if (is_write) {
594 uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu);
596 ret = intel_gvt_ops->emulate_mmio_write(vgpu,
597 bar0_start + pos, buf, count);
598 } else {
599 uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu);
601 ret = intel_gvt_ops->emulate_mmio_read(vgpu,
602 bar0_start + pos, buf, count);
604 break;
605 case VFIO_PCI_BAR2_REGION_INDEX:
606 case VFIO_PCI_BAR3_REGION_INDEX:
607 case VFIO_PCI_BAR4_REGION_INDEX:
608 case VFIO_PCI_BAR5_REGION_INDEX:
609 case VFIO_PCI_VGA_REGION_INDEX:
610 case VFIO_PCI_ROM_REGION_INDEX:
611 default:
612 gvt_err("unsupported region: %u\n", index);
615 return ret == 0 ? count : ret;
618 static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
619 size_t count, loff_t *ppos)
621 unsigned int done = 0;
622 int ret;
624 while (count) {
625 size_t filled;
627 if (count >= 4 && !(*ppos % 4)) {
628 u32 val;
630 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
631 ppos, false);
632 if (ret <= 0)
633 goto read_err;
635 if (copy_to_user(buf, &val, sizeof(val)))
636 goto read_err;
638 filled = 4;
639 } else if (count >= 2 && !(*ppos % 2)) {
640 u16 val;
642 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
643 ppos, false);
644 if (ret <= 0)
645 goto read_err;
647 if (copy_to_user(buf, &val, sizeof(val)))
648 goto read_err;
650 filled = 2;
651 } else {
652 u8 val;
654 ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos,
655 false);
656 if (ret <= 0)
657 goto read_err;
659 if (copy_to_user(buf, &val, sizeof(val)))
660 goto read_err;
662 filled = 1;
665 count -= filled;
666 done += filled;
667 *ppos += filled;
668 buf += filled;
671 return done;
673 read_err:
674 return -EFAULT;
677 static ssize_t intel_vgpu_write(struct mdev_device *mdev,
678 const char __user *buf,
679 size_t count, loff_t *ppos)
681 unsigned int done = 0;
682 int ret;
684 while (count) {
685 size_t filled;
687 if (count >= 4 && !(*ppos % 4)) {
688 u32 val;
690 if (copy_from_user(&val, buf, sizeof(val)))
691 goto write_err;
693 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
694 ppos, true);
695 if (ret <= 0)
696 goto write_err;
698 filled = 4;
699 } else if (count >= 2 && !(*ppos % 2)) {
700 u16 val;
702 if (copy_from_user(&val, buf, sizeof(val)))
703 goto write_err;
705 ret = intel_vgpu_rw(mdev, (char *)&val,
706 sizeof(val), ppos, true);
707 if (ret <= 0)
708 goto write_err;
710 filled = 2;
711 } else {
712 u8 val;
714 if (copy_from_user(&val, buf, sizeof(val)))
715 goto write_err;
717 ret = intel_vgpu_rw(mdev, &val, sizeof(val),
718 ppos, true);
719 if (ret <= 0)
720 goto write_err;
722 filled = 1;
725 count -= filled;
726 done += filled;
727 *ppos += filled;
728 buf += filled;
731 return done;
732 write_err:
733 return -EFAULT;
736 static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
738 unsigned int index;
739 u64 virtaddr;
740 unsigned long req_size, pgoff = 0;
741 pgprot_t pg_prot;
742 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
744 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
745 if (index >= VFIO_PCI_ROM_REGION_INDEX)
746 return -EINVAL;
748 if (vma->vm_end < vma->vm_start)
749 return -EINVAL;
750 if ((vma->vm_flags & VM_SHARED) == 0)
751 return -EINVAL;
752 if (index != VFIO_PCI_BAR2_REGION_INDEX)
753 return -EINVAL;
755 pg_prot = vma->vm_page_prot;
756 virtaddr = vma->vm_start;
757 req_size = vma->vm_end - vma->vm_start;
758 pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
760 return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
763 static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
765 if (type == VFIO_PCI_INTX_IRQ_INDEX || type == VFIO_PCI_MSI_IRQ_INDEX)
766 return 1;
768 return 0;
771 static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
772 unsigned int index, unsigned int start,
773 unsigned int count, uint32_t flags,
774 void *data)
776 return 0;
779 static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
780 unsigned int index, unsigned int start,
781 unsigned int count, uint32_t flags, void *data)
783 return 0;
786 static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
787 unsigned int index, unsigned int start, unsigned int count,
788 uint32_t flags, void *data)
790 return 0;
793 static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
794 unsigned int index, unsigned int start, unsigned int count,
795 uint32_t flags, void *data)
797 struct eventfd_ctx *trigger;
799 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
800 int fd = *(int *)data;
802 trigger = eventfd_ctx_fdget(fd);
803 if (IS_ERR(trigger)) {
804 gvt_err("eventfd_ctx_fdget failed\n");
805 return PTR_ERR(trigger);
807 vgpu->vdev.msi_trigger = trigger;
810 return 0;
813 static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, uint32_t flags,
814 unsigned int index, unsigned int start, unsigned int count,
815 void *data)
817 int (*func)(struct intel_vgpu *vgpu, unsigned int index,
818 unsigned int start, unsigned int count, uint32_t flags,
819 void *data) = NULL;
821 switch (index) {
822 case VFIO_PCI_INTX_IRQ_INDEX:
823 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
824 case VFIO_IRQ_SET_ACTION_MASK:
825 func = intel_vgpu_set_intx_mask;
826 break;
827 case VFIO_IRQ_SET_ACTION_UNMASK:
828 func = intel_vgpu_set_intx_unmask;
829 break;
830 case VFIO_IRQ_SET_ACTION_TRIGGER:
831 func = intel_vgpu_set_intx_trigger;
832 break;
834 break;
835 case VFIO_PCI_MSI_IRQ_INDEX:
836 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
837 case VFIO_IRQ_SET_ACTION_MASK:
838 case VFIO_IRQ_SET_ACTION_UNMASK:
839 /* XXX Need masking support exported */
840 break;
841 case VFIO_IRQ_SET_ACTION_TRIGGER:
842 func = intel_vgpu_set_msi_trigger;
843 break;
845 break;
848 if (!func)
849 return -ENOTTY;
851 return func(vgpu, index, start, count, flags, data);
854 static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
855 unsigned long arg)
857 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
858 unsigned long minsz;
860 gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
862 if (cmd == VFIO_DEVICE_GET_INFO) {
863 struct vfio_device_info info;
865 minsz = offsetofend(struct vfio_device_info, num_irqs);
867 if (copy_from_user(&info, (void __user *)arg, minsz))
868 return -EFAULT;
870 if (info.argsz < minsz)
871 return -EINVAL;
873 info.flags = VFIO_DEVICE_FLAGS_PCI;
874 info.flags |= VFIO_DEVICE_FLAGS_RESET;
875 info.num_regions = VFIO_PCI_NUM_REGIONS;
876 info.num_irqs = VFIO_PCI_NUM_IRQS;
878 return copy_to_user((void __user *)arg, &info, minsz) ?
879 -EFAULT : 0;
881 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
882 struct vfio_region_info info;
883 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
884 int i, ret;
885 struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
886 size_t size;
887 int nr_areas = 1;
888 int cap_type_id;
890 minsz = offsetofend(struct vfio_region_info, offset);
892 if (copy_from_user(&info, (void __user *)arg, minsz))
893 return -EFAULT;
895 if (info.argsz < minsz)
896 return -EINVAL;
898 switch (info.index) {
899 case VFIO_PCI_CONFIG_REGION_INDEX:
900 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
901 info.size = INTEL_GVT_MAX_CFG_SPACE_SZ;
902 info.flags = VFIO_REGION_INFO_FLAG_READ |
903 VFIO_REGION_INFO_FLAG_WRITE;
904 break;
905 case VFIO_PCI_BAR0_REGION_INDEX:
906 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
907 info.size = vgpu->cfg_space.bar[info.index].size;
908 if (!info.size) {
909 info.flags = 0;
910 break;
913 info.flags = VFIO_REGION_INFO_FLAG_READ |
914 VFIO_REGION_INFO_FLAG_WRITE;
915 break;
916 case VFIO_PCI_BAR1_REGION_INDEX:
917 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
918 info.size = 0;
919 info.flags = 0;
920 break;
921 case VFIO_PCI_BAR2_REGION_INDEX:
922 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
923 info.flags = VFIO_REGION_INFO_FLAG_CAPS |
924 VFIO_REGION_INFO_FLAG_MMAP |
925 VFIO_REGION_INFO_FLAG_READ |
926 VFIO_REGION_INFO_FLAG_WRITE;
927 info.size = gvt_aperture_sz(vgpu->gvt);
929 size = sizeof(*sparse) +
930 (nr_areas * sizeof(*sparse->areas));
931 sparse = kzalloc(size, GFP_KERNEL);
932 if (!sparse)
933 return -ENOMEM;
935 sparse->nr_areas = nr_areas;
936 cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
937 sparse->areas[0].offset =
938 PAGE_ALIGN(vgpu_aperture_offset(vgpu));
939 sparse->areas[0].size = vgpu_aperture_sz(vgpu);
940 if (!caps.buf) {
941 kfree(caps.buf);
942 caps.buf = NULL;
943 caps.size = 0;
945 break;
947 case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
948 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
949 info.size = 0;
951 info.flags = 0;
952 gvt_dbg_core("get region info bar:%d\n", info.index);
953 break;
955 case VFIO_PCI_ROM_REGION_INDEX:
956 case VFIO_PCI_VGA_REGION_INDEX:
957 gvt_dbg_core("get region info index:%d\n", info.index);
958 break;
959 default:
961 struct vfio_region_info_cap_type cap_type;
963 if (info.index >= VFIO_PCI_NUM_REGIONS +
964 vgpu->vdev.num_regions)
965 return -EINVAL;
967 i = info.index - VFIO_PCI_NUM_REGIONS;
969 info.offset =
970 VFIO_PCI_INDEX_TO_OFFSET(info.index);
971 info.size = vgpu->vdev.region[i].size;
972 info.flags = vgpu->vdev.region[i].flags;
974 cap_type.type = vgpu->vdev.region[i].type;
975 cap_type.subtype = vgpu->vdev.region[i].subtype;
977 ret = vfio_info_add_capability(&caps,
978 VFIO_REGION_INFO_CAP_TYPE,
979 &cap_type);
980 if (ret)
981 return ret;
985 if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
986 switch (cap_type_id) {
987 case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
988 ret = vfio_info_add_capability(&caps,
989 VFIO_REGION_INFO_CAP_SPARSE_MMAP,
990 sparse);
991 kfree(sparse);
992 if (ret)
993 return ret;
994 break;
995 default:
996 return -EINVAL;
1000 if (caps.size) {
1001 if (info.argsz < sizeof(info) + caps.size) {
1002 info.argsz = sizeof(info) + caps.size;
1003 info.cap_offset = 0;
1004 } else {
1005 vfio_info_cap_shift(&caps, sizeof(info));
1006 if (copy_to_user((void __user *)arg +
1007 sizeof(info), caps.buf,
1008 caps.size)) {
1009 kfree(caps.buf);
1010 return -EFAULT;
1012 info.cap_offset = sizeof(info);
1015 kfree(caps.buf);
1018 return copy_to_user((void __user *)arg, &info, minsz) ?
1019 -EFAULT : 0;
1020 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
1021 struct vfio_irq_info info;
1023 minsz = offsetofend(struct vfio_irq_info, count);
1025 if (copy_from_user(&info, (void __user *)arg, minsz))
1026 return -EFAULT;
1028 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
1029 return -EINVAL;
1031 switch (info.index) {
1032 case VFIO_PCI_INTX_IRQ_INDEX:
1033 case VFIO_PCI_MSI_IRQ_INDEX:
1034 break;
1035 default:
1036 return -EINVAL;
1039 info.flags = VFIO_IRQ_INFO_EVENTFD;
1041 info.count = intel_vgpu_get_irq_count(vgpu, info.index);
1043 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
1044 info.flags |= (VFIO_IRQ_INFO_MASKABLE |
1045 VFIO_IRQ_INFO_AUTOMASKED);
1046 else
1047 info.flags |= VFIO_IRQ_INFO_NORESIZE;
1049 return copy_to_user((void __user *)arg, &info, minsz) ?
1050 -EFAULT : 0;
1051 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
1052 struct vfio_irq_set hdr;
1053 u8 *data = NULL;
1054 int ret = 0;
1055 size_t data_size = 0;
1057 minsz = offsetofend(struct vfio_irq_set, count);
1059 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1060 return -EFAULT;
1062 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
1063 int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
1065 ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
1066 VFIO_PCI_NUM_IRQS, &data_size);
1067 if (ret) {
1068 gvt_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
1069 return -EINVAL;
1071 if (data_size) {
1072 data = memdup_user((void __user *)(arg + minsz),
1073 data_size);
1074 if (IS_ERR(data))
1075 return PTR_ERR(data);
1079 ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
1080 hdr.start, hdr.count, data);
1081 kfree(data);
1083 return ret;
1084 } else if (cmd == VFIO_DEVICE_RESET) {
1085 intel_gvt_ops->vgpu_reset(vgpu);
1086 return 0;
1089 return 0;
1092 static const struct mdev_parent_ops intel_vgpu_ops = {
1093 .supported_type_groups = intel_vgpu_type_groups,
1094 .create = intel_vgpu_create,
1095 .remove = intel_vgpu_remove,
1097 .open = intel_vgpu_open,
1098 .release = intel_vgpu_release,
1100 .read = intel_vgpu_read,
1101 .write = intel_vgpu_write,
1102 .mmap = intel_vgpu_mmap,
1103 .ioctl = intel_vgpu_ioctl,
1106 static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
1108 if (!intel_gvt_init_vgpu_type_groups(gvt))
1109 return -EFAULT;
1111 intel_gvt_ops = ops;
1113 return mdev_register_device(dev, &intel_vgpu_ops);
1116 static void kvmgt_host_exit(struct device *dev, void *gvt)
1118 intel_gvt_cleanup_vgpu_type_groups(gvt);
1119 mdev_unregister_device(dev);
1122 static int kvmgt_write_protect_add(unsigned long handle, u64 gfn)
1124 struct kvmgt_guest_info *info;
1125 struct kvm *kvm;
1126 struct kvm_memory_slot *slot;
1127 int idx;
1129 if (!handle_valid(handle))
1130 return -ESRCH;
1132 info = (struct kvmgt_guest_info *)handle;
1133 kvm = info->kvm;
1135 idx = srcu_read_lock(&kvm->srcu);
1136 slot = gfn_to_memslot(kvm, gfn);
1138 spin_lock(&kvm->mmu_lock);
1140 if (kvmgt_gfn_is_write_protected(info, gfn))
1141 goto out;
1143 kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1144 kvmgt_protect_table_add(info, gfn);
1146 out:
1147 spin_unlock(&kvm->mmu_lock);
1148 srcu_read_unlock(&kvm->srcu, idx);
1149 return 0;
1152 static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn)
1154 struct kvmgt_guest_info *info;
1155 struct kvm *kvm;
1156 struct kvm_memory_slot *slot;
1157 int idx;
1159 if (!handle_valid(handle))
1160 return 0;
1162 info = (struct kvmgt_guest_info *)handle;
1163 kvm = info->kvm;
1165 idx = srcu_read_lock(&kvm->srcu);
1166 slot = gfn_to_memslot(kvm, gfn);
1168 spin_lock(&kvm->mmu_lock);
1170 if (!kvmgt_gfn_is_write_protected(info, gfn))
1171 goto out;
1173 kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1174 kvmgt_protect_table_del(info, gfn);
1176 out:
1177 spin_unlock(&kvm->mmu_lock);
1178 srcu_read_unlock(&kvm->srcu, idx);
1179 return 0;
1182 static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1183 const u8 *val, int len,
1184 struct kvm_page_track_notifier_node *node)
1186 struct kvmgt_guest_info *info = container_of(node,
1187 struct kvmgt_guest_info, track_node);
1189 if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
1190 intel_gvt_ops->emulate_mmio_write(info->vgpu, gpa,
1191 (void *)val, len);
1194 static void kvmgt_page_track_flush_slot(struct kvm *kvm,
1195 struct kvm_memory_slot *slot,
1196 struct kvm_page_track_notifier_node *node)
1198 int i;
1199 gfn_t gfn;
1200 struct kvmgt_guest_info *info = container_of(node,
1201 struct kvmgt_guest_info, track_node);
1203 spin_lock(&kvm->mmu_lock);
1204 for (i = 0; i < slot->npages; i++) {
1205 gfn = slot->base_gfn + i;
1206 if (kvmgt_gfn_is_write_protected(info, gfn)) {
1207 kvm_slot_page_track_remove_page(kvm, slot, gfn,
1208 KVM_PAGE_TRACK_WRITE);
1209 kvmgt_protect_table_del(info, gfn);
1212 spin_unlock(&kvm->mmu_lock);
1215 static bool kvmgt_check_guest(void)
1217 unsigned int eax, ebx, ecx, edx;
1218 char s[12];
1219 unsigned int *i;
1221 eax = KVM_CPUID_SIGNATURE;
1222 ebx = ecx = edx = 0;
1224 asm volatile ("cpuid"
1225 : "+a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx)
1227 : "cc", "memory");
1228 i = (unsigned int *)s;
1229 i[0] = ebx;
1230 i[1] = ecx;
1231 i[2] = edx;
1233 return !strncmp(s, "KVMKVMKVM", strlen("KVMKVMKVM"));
1237 * NOTE:
1238 * It's actually impossible to check if we are running in KVM host,
1239 * since the "KVM host" is simply native. So we only dectect guest here.
1241 static int kvmgt_detect_host(void)
1243 #ifdef CONFIG_INTEL_IOMMU
1244 if (intel_iommu_gfx_mapped) {
1245 gvt_err("Hardware IOMMU compatibility not yet supported, try to boot with intel_iommu=igfx_off\n");
1246 return -ENODEV;
1248 #endif
1249 return kvmgt_check_guest() ? -ENODEV : 0;
1252 static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
1254 struct intel_vgpu *itr;
1255 struct kvmgt_guest_info *info;
1256 int id;
1257 bool ret = false;
1259 mutex_lock(&vgpu->gvt->lock);
1260 for_each_active_vgpu(vgpu->gvt, itr, id) {
1261 if (!handle_valid(itr->handle))
1262 continue;
1264 info = (struct kvmgt_guest_info *)itr->handle;
1265 if (kvm && kvm == info->kvm) {
1266 ret = true;
1267 goto out;
1270 out:
1271 mutex_unlock(&vgpu->gvt->lock);
1272 return ret;
1275 static int kvmgt_guest_init(struct mdev_device *mdev)
1277 struct kvmgt_guest_info *info;
1278 struct intel_vgpu *vgpu;
1279 struct kvm *kvm;
1281 vgpu = mdev_get_drvdata(mdev);
1282 if (handle_valid(vgpu->handle))
1283 return -EEXIST;
1285 kvm = vgpu->vdev.kvm;
1286 if (!kvm || kvm->mm != current->mm) {
1287 gvt_err("KVM is required to use Intel vGPU\n");
1288 return -ESRCH;
1291 if (__kvmgt_vgpu_exist(vgpu, kvm))
1292 return -EEXIST;
1294 info = vzalloc(sizeof(struct kvmgt_guest_info));
1295 if (!info)
1296 return -ENOMEM;
1298 vgpu->handle = (unsigned long)info;
1299 info->vgpu = vgpu;
1300 info->kvm = kvm;
1302 kvmgt_protect_table_init(info);
1303 gvt_cache_init(vgpu);
1305 info->track_node.track_write = kvmgt_page_track_write;
1306 info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
1307 kvm_page_track_register_notifier(kvm, &info->track_node);
1309 return 0;
1312 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
1314 struct intel_vgpu *vgpu;
1316 if (!info) {
1317 gvt_err("kvmgt_guest_info invalid\n");
1318 return false;
1321 vgpu = info->vgpu;
1323 kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
1324 kvmgt_protect_table_destroy(info);
1325 gvt_cache_destroy(vgpu);
1326 vfree(info);
1328 return true;
1331 static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
1333 /* nothing to do here */
1334 return 0;
1337 static void kvmgt_detach_vgpu(unsigned long handle)
1339 /* nothing to do here */
1342 static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
1344 struct kvmgt_guest_info *info;
1345 struct intel_vgpu *vgpu;
1347 if (!handle_valid(handle))
1348 return -ESRCH;
1350 info = (struct kvmgt_guest_info *)handle;
1351 vgpu = info->vgpu;
1353 if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
1354 return 0;
1356 return -EFAULT;
1359 static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1361 unsigned long pfn;
1362 struct kvmgt_guest_info *info;
1363 struct device *dev;
1364 int rc;
1366 if (!handle_valid(handle))
1367 return INTEL_GVT_INVALID_ADDR;
1369 info = (struct kvmgt_guest_info *)handle;
1370 pfn = gvt_cache_find(info->vgpu, gfn);
1371 if (pfn != 0)
1372 return pfn;
1374 pfn = INTEL_GVT_INVALID_ADDR;
1375 dev = mdev_dev(info->vgpu->vdev.mdev);
1376 rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
1377 if (rc != 1) {
1378 gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);
1379 return INTEL_GVT_INVALID_ADDR;
1382 gvt_cache_add(info->vgpu, gfn, pfn);
1383 return pfn;
1386 static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
1387 void *buf, unsigned long len, bool write)
1389 struct kvmgt_guest_info *info;
1390 struct kvm *kvm;
1391 int ret;
1392 bool kthread = current->mm == NULL;
1394 if (!handle_valid(handle))
1395 return -ESRCH;
1397 info = (struct kvmgt_guest_info *)handle;
1398 kvm = info->kvm;
1400 if (kthread)
1401 use_mm(kvm->mm);
1403 ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
1404 kvm_read_guest(kvm, gpa, buf, len);
1406 if (kthread)
1407 unuse_mm(kvm->mm);
1409 return ret;
1412 static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
1413 void *buf, unsigned long len)
1415 return kvmgt_rw_gpa(handle, gpa, buf, len, false);
1418 static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa,
1419 void *buf, unsigned long len)
1421 return kvmgt_rw_gpa(handle, gpa, buf, len, true);
1424 static unsigned long kvmgt_virt_to_pfn(void *addr)
1426 return PFN_DOWN(__pa(addr));
1429 struct intel_gvt_mpt kvmgt_mpt = {
1430 .detect_host = kvmgt_detect_host,
1431 .host_init = kvmgt_host_init,
1432 .host_exit = kvmgt_host_exit,
1433 .attach_vgpu = kvmgt_attach_vgpu,
1434 .detach_vgpu = kvmgt_detach_vgpu,
1435 .inject_msi = kvmgt_inject_msi,
1436 .from_virt_to_mfn = kvmgt_virt_to_pfn,
1437 .set_wp_page = kvmgt_write_protect_add,
1438 .unset_wp_page = kvmgt_write_protect_remove,
1439 .read_gpa = kvmgt_read_gpa,
1440 .write_gpa = kvmgt_write_gpa,
1441 .gfn_to_mfn = kvmgt_gfn_to_pfn,
1443 EXPORT_SYMBOL_GPL(kvmgt_mpt);
1445 static int __init kvmgt_init(void)
1447 return 0;
1450 static void __exit kvmgt_exit(void)
1454 module_init(kvmgt_init);
1455 module_exit(kvmgt_exit);
1457 MODULE_LICENSE("GPL and additional rights");
1458 MODULE_AUTHOR("Intel Corporation");