2 * KVMGT - the implementation of Intel mediated pass-through framework for KVM
4 * Copyright(c) 2014-2016 Intel Corporation. All rights reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Kevin Tian <kevin.tian@intel.com>
27 * Jike Song <jike.song@intel.com>
28 * Xiaoguang Chen <xiaoguang.chen@intel.com>
31 #include <linux/init.h>
32 #include <linux/device.h>
34 #include <linux/mmu_context.h>
35 #include <linux/types.h>
36 #include <linux/list.h>
37 #include <linux/rbtree.h>
38 #include <linux/spinlock.h>
39 #include <linux/eventfd.h>
40 #include <linux/uuid.h>
41 #include <linux/kvm_host.h>
42 #include <linux/vfio.h>
43 #include <linux/mdev.h>
48 static const struct intel_gvt_ops
*intel_gvt_ops
;
50 /* helper macros copied from vfio-pci */
51 #define VFIO_PCI_OFFSET_SHIFT 40
52 #define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
53 #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
54 #define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
65 struct hlist_node hnode
;
68 struct kvmgt_guest_info
{
70 struct intel_vgpu
*vgpu
;
71 struct kvm_page_track_notifier_node track_node
;
72 #define NR_BKT (1 << 18)
73 struct hlist_head ptable
[NR_BKT
];
83 static inline bool handle_valid(unsigned long handle
)
85 return !!(handle
& ~0xff);
88 static int kvmgt_guest_init(struct mdev_device
*mdev
);
89 static void intel_vgpu_release_work(struct work_struct
*work
);
90 static bool kvmgt_guest_exit(struct kvmgt_guest_info
*info
);
92 static struct gvt_dma
*__gvt_cache_find(struct intel_vgpu
*vgpu
, gfn_t gfn
)
94 struct rb_node
*node
= vgpu
->vdev
.cache
.rb_node
;
95 struct gvt_dma
*ret
= NULL
;
98 struct gvt_dma
*itr
= rb_entry(node
, struct gvt_dma
, node
);
101 node
= node
->rb_left
;
102 else if (gfn
> itr
->gfn
)
103 node
= node
->rb_right
;
114 static kvm_pfn_t
gvt_cache_find(struct intel_vgpu
*vgpu
, gfn_t gfn
)
116 struct gvt_dma
*entry
;
118 mutex_lock(&vgpu
->vdev
.cache_lock
);
119 entry
= __gvt_cache_find(vgpu
, gfn
);
120 mutex_unlock(&vgpu
->vdev
.cache_lock
);
122 return entry
== NULL
? 0 : entry
->pfn
;
125 static void gvt_cache_add(struct intel_vgpu
*vgpu
, gfn_t gfn
, kvm_pfn_t pfn
)
127 struct gvt_dma
*new, *itr
;
128 struct rb_node
**link
= &vgpu
->vdev
.cache
.rb_node
, *parent
= NULL
;
130 new = kzalloc(sizeof(struct gvt_dma
), GFP_KERNEL
);
137 mutex_lock(&vgpu
->vdev
.cache_lock
);
140 itr
= rb_entry(parent
, struct gvt_dma
, node
);
144 else if (gfn
< itr
->gfn
)
145 link
= &parent
->rb_left
;
147 link
= &parent
->rb_right
;
150 rb_link_node(&new->node
, parent
, link
);
151 rb_insert_color(&new->node
, &vgpu
->vdev
.cache
);
152 mutex_unlock(&vgpu
->vdev
.cache_lock
);
156 mutex_unlock(&vgpu
->vdev
.cache_lock
);
160 static void __gvt_cache_remove_entry(struct intel_vgpu
*vgpu
,
161 struct gvt_dma
*entry
)
163 rb_erase(&entry
->node
, &vgpu
->vdev
.cache
);
167 static void gvt_cache_remove(struct intel_vgpu
*vgpu
, gfn_t gfn
)
169 struct device
*dev
= mdev_dev(vgpu
->vdev
.mdev
);
170 struct gvt_dma
*this;
174 mutex_lock(&vgpu
->vdev
.cache_lock
);
175 this = __gvt_cache_find(vgpu
, gfn
);
177 mutex_unlock(&vgpu
->vdev
.cache_lock
);
182 rc
= vfio_unpin_pages(dev
, &g1
, 1);
184 __gvt_cache_remove_entry(vgpu
, this);
185 mutex_unlock(&vgpu
->vdev
.cache_lock
);
188 static void gvt_cache_init(struct intel_vgpu
*vgpu
)
190 vgpu
->vdev
.cache
= RB_ROOT
;
191 mutex_init(&vgpu
->vdev
.cache_lock
);
194 static void gvt_cache_destroy(struct intel_vgpu
*vgpu
)
197 struct rb_node
*node
= NULL
;
198 struct device
*dev
= mdev_dev(vgpu
->vdev
.mdev
);
201 mutex_lock(&vgpu
->vdev
.cache_lock
);
202 while ((node
= rb_first(&vgpu
->vdev
.cache
))) {
203 dma
= rb_entry(node
, struct gvt_dma
, node
);
206 vfio_unpin_pages(dev
, &gfn
, 1);
207 __gvt_cache_remove_entry(vgpu
, dma
);
209 mutex_unlock(&vgpu
->vdev
.cache_lock
);
212 static struct intel_vgpu_type
*intel_gvt_find_vgpu_type(struct intel_gvt
*gvt
,
216 struct intel_vgpu_type
*t
;
217 const char *driver_name
= dev_driver_string(
218 &gvt
->dev_priv
->drm
.pdev
->dev
);
220 for (i
= 0; i
< gvt
->num_types
; i
++) {
222 if (!strncmp(t
->name
, name
+ strlen(driver_name
) + 1,
230 static ssize_t
available_instance_show(struct kobject
*kobj
, struct device
*dev
,
233 struct intel_vgpu_type
*type
;
234 unsigned int num
= 0;
235 void *gvt
= kdev_to_i915(dev
)->gvt
;
237 type
= intel_gvt_find_vgpu_type(gvt
, kobject_name(kobj
));
241 num
= type
->avail_instance
;
243 return sprintf(buf
, "%u\n", num
);
246 static ssize_t
device_api_show(struct kobject
*kobj
, struct device
*dev
,
249 return sprintf(buf
, "%s\n", VFIO_DEVICE_API_PCI_STRING
);
252 static ssize_t
description_show(struct kobject
*kobj
, struct device
*dev
,
255 struct intel_vgpu_type
*type
;
256 void *gvt
= kdev_to_i915(dev
)->gvt
;
258 type
= intel_gvt_find_vgpu_type(gvt
, kobject_name(kobj
));
262 return sprintf(buf
, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
264 BYTES_TO_MB(type
->low_gm_size
),
265 BYTES_TO_MB(type
->high_gm_size
),
269 static MDEV_TYPE_ATTR_RO(available_instance
);
270 static MDEV_TYPE_ATTR_RO(device_api
);
271 static MDEV_TYPE_ATTR_RO(description
);
273 static struct attribute
*type_attrs
[] = {
274 &mdev_type_attr_available_instance
.attr
,
275 &mdev_type_attr_device_api
.attr
,
276 &mdev_type_attr_description
.attr
,
280 static struct attribute_group
*intel_vgpu_type_groups
[] = {
281 [0 ... NR_MAX_INTEL_VGPU_TYPES
- 1] = NULL
,
284 static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt
*gvt
)
287 struct intel_vgpu_type
*type
;
288 struct attribute_group
*group
;
290 for (i
= 0; i
< gvt
->num_types
; i
++) {
291 type
= &gvt
->types
[i
];
293 group
= kzalloc(sizeof(struct attribute_group
), GFP_KERNEL
);
297 group
->name
= type
->name
;
298 group
->attrs
= type_attrs
;
299 intel_vgpu_type_groups
[i
] = group
;
305 for (j
= 0; j
< i
; j
++) {
306 group
= intel_vgpu_type_groups
[j
];
313 static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt
*gvt
)
316 struct attribute_group
*group
;
318 for (i
= 0; i
< gvt
->num_types
; i
++) {
319 group
= intel_vgpu_type_groups
[i
];
324 static void kvmgt_protect_table_init(struct kvmgt_guest_info
*info
)
326 hash_init(info
->ptable
);
329 static void kvmgt_protect_table_destroy(struct kvmgt_guest_info
*info
)
331 struct kvmgt_pgfn
*p
;
332 struct hlist_node
*tmp
;
335 hash_for_each_safe(info
->ptable
, i
, tmp
, p
, hnode
) {
341 static struct kvmgt_pgfn
*
342 __kvmgt_protect_table_find(struct kvmgt_guest_info
*info
, gfn_t gfn
)
344 struct kvmgt_pgfn
*p
, *res
= NULL
;
346 hash_for_each_possible(info
->ptable
, p
, hnode
, gfn
) {
356 static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info
*info
,
359 struct kvmgt_pgfn
*p
;
361 p
= __kvmgt_protect_table_find(info
, gfn
);
365 static void kvmgt_protect_table_add(struct kvmgt_guest_info
*info
, gfn_t gfn
)
367 struct kvmgt_pgfn
*p
;
369 if (kvmgt_gfn_is_write_protected(info
, gfn
))
372 p
= kzalloc(sizeof(struct kvmgt_pgfn
), GFP_ATOMIC
);
373 if (WARN(!p
, "gfn: 0x%llx\n", gfn
))
377 hash_add(info
->ptable
, &p
->hnode
, gfn
);
380 static void kvmgt_protect_table_del(struct kvmgt_guest_info
*info
,
383 struct kvmgt_pgfn
*p
;
385 p
= __kvmgt_protect_table_find(info
, gfn
);
392 static int intel_vgpu_create(struct kobject
*kobj
, struct mdev_device
*mdev
)
394 struct intel_vgpu
*vgpu
;
395 struct intel_vgpu_type
*type
;
399 pdev
= mdev_parent_dev(mdev
);
400 gvt
= kdev_to_i915(pdev
)->gvt
;
402 type
= intel_gvt_find_vgpu_type(gvt
, kobject_name(kobj
));
404 gvt_err("failed to find type %s to create\n",
409 vgpu
= intel_gvt_ops
->vgpu_create(gvt
, type
);
410 if (IS_ERR_OR_NULL(vgpu
)) {
411 gvt_err("create intel vgpu failed\n");
415 INIT_WORK(&vgpu
->vdev
.release_work
, intel_vgpu_release_work
);
417 vgpu
->vdev
.mdev
= mdev
;
418 mdev_set_drvdata(mdev
, vgpu
);
420 gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
421 dev_name(mdev_dev(mdev
)));
425 static int intel_vgpu_remove(struct mdev_device
*mdev
)
427 struct intel_vgpu
*vgpu
= mdev_get_drvdata(mdev
);
429 if (handle_valid(vgpu
->handle
))
432 intel_gvt_ops
->vgpu_destroy(vgpu
);
436 static int intel_vgpu_iommu_notifier(struct notifier_block
*nb
,
437 unsigned long action
, void *data
)
439 struct intel_vgpu
*vgpu
= container_of(nb
,
441 vdev
.iommu_notifier
);
443 if (action
== VFIO_IOMMU_NOTIFY_DMA_UNMAP
) {
444 struct vfio_iommu_type1_dma_unmap
*unmap
= data
;
445 unsigned long gfn
, end_gfn
;
447 gfn
= unmap
->iova
>> PAGE_SHIFT
;
448 end_gfn
= gfn
+ unmap
->size
/ PAGE_SIZE
;
450 while (gfn
< end_gfn
)
451 gvt_cache_remove(vgpu
, gfn
++);
457 static int intel_vgpu_group_notifier(struct notifier_block
*nb
,
458 unsigned long action
, void *data
)
460 struct intel_vgpu
*vgpu
= container_of(nb
,
462 vdev
.group_notifier
);
464 /* the only action we care about */
465 if (action
== VFIO_GROUP_NOTIFY_SET_KVM
) {
466 vgpu
->vdev
.kvm
= data
;
469 schedule_work(&vgpu
->vdev
.release_work
);
475 static int intel_vgpu_open(struct mdev_device
*mdev
)
477 struct intel_vgpu
*vgpu
= mdev_get_drvdata(mdev
);
478 unsigned long events
;
481 vgpu
->vdev
.iommu_notifier
.notifier_call
= intel_vgpu_iommu_notifier
;
482 vgpu
->vdev
.group_notifier
.notifier_call
= intel_vgpu_group_notifier
;
484 events
= VFIO_IOMMU_NOTIFY_DMA_UNMAP
;
485 ret
= vfio_register_notifier(mdev_dev(mdev
), VFIO_IOMMU_NOTIFY
, &events
,
486 &vgpu
->vdev
.iommu_notifier
);
488 gvt_err("vfio_register_notifier for iommu failed: %d\n", ret
);
492 events
= VFIO_GROUP_NOTIFY_SET_KVM
;
493 ret
= vfio_register_notifier(mdev_dev(mdev
), VFIO_GROUP_NOTIFY
, &events
,
494 &vgpu
->vdev
.group_notifier
);
496 gvt_err("vfio_register_notifier for group failed: %d\n", ret
);
500 return kvmgt_guest_init(mdev
);
503 vfio_unregister_notifier(mdev_dev(mdev
), VFIO_IOMMU_NOTIFY
,
504 &vgpu
->vdev
.iommu_notifier
);
509 static void __intel_vgpu_release(struct intel_vgpu
*vgpu
)
511 struct kvmgt_guest_info
*info
;
513 if (!handle_valid(vgpu
->handle
))
516 vfio_unregister_notifier(mdev_dev(vgpu
->vdev
.mdev
), VFIO_IOMMU_NOTIFY
,
517 &vgpu
->vdev
.iommu_notifier
);
518 vfio_unregister_notifier(mdev_dev(vgpu
->vdev
.mdev
), VFIO_GROUP_NOTIFY
,
519 &vgpu
->vdev
.group_notifier
);
521 info
= (struct kvmgt_guest_info
*)vgpu
->handle
;
522 kvmgt_guest_exit(info
);
526 static void intel_vgpu_release(struct mdev_device
*mdev
)
528 struct intel_vgpu
*vgpu
= mdev_get_drvdata(mdev
);
530 __intel_vgpu_release(vgpu
);
533 static void intel_vgpu_release_work(struct work_struct
*work
)
535 struct intel_vgpu
*vgpu
= container_of(work
, struct intel_vgpu
,
537 __intel_vgpu_release(vgpu
);
540 static uint64_t intel_vgpu_get_bar0_addr(struct intel_vgpu
*vgpu
)
542 u32 start_lo
, start_hi
;
544 int pos
= PCI_BASE_ADDRESS_0
;
546 start_lo
= (*(u32
*)(vgpu
->cfg_space
.virtual_cfg_space
+ pos
)) &
547 PCI_BASE_ADDRESS_MEM_MASK
;
548 mem_type
= (*(u32
*)(vgpu
->cfg_space
.virtual_cfg_space
+ pos
)) &
549 PCI_BASE_ADDRESS_MEM_TYPE_MASK
;
552 case PCI_BASE_ADDRESS_MEM_TYPE_64
:
553 start_hi
= (*(u32
*)(vgpu
->cfg_space
.virtual_cfg_space
556 case PCI_BASE_ADDRESS_MEM_TYPE_32
:
557 case PCI_BASE_ADDRESS_MEM_TYPE_1M
:
558 /* 1M mem BAR treated as 32-bit BAR */
560 /* mem unknown type treated as 32-bit BAR */
565 return ((u64
)start_hi
<< 32) | start_lo
;
568 static ssize_t
intel_vgpu_rw(struct mdev_device
*mdev
, char *buf
,
569 size_t count
, loff_t
*ppos
, bool is_write
)
571 struct intel_vgpu
*vgpu
= mdev_get_drvdata(mdev
);
572 unsigned int index
= VFIO_PCI_OFFSET_TO_INDEX(*ppos
);
573 uint64_t pos
= *ppos
& VFIO_PCI_OFFSET_MASK
;
577 if (index
>= VFIO_PCI_NUM_REGIONS
) {
578 gvt_err("invalid index: %u\n", index
);
583 case VFIO_PCI_CONFIG_REGION_INDEX
:
585 ret
= intel_gvt_ops
->emulate_cfg_write(vgpu
, pos
,
588 ret
= intel_gvt_ops
->emulate_cfg_read(vgpu
, pos
,
591 case VFIO_PCI_BAR0_REGION_INDEX
:
592 case VFIO_PCI_BAR1_REGION_INDEX
:
594 uint64_t bar0_start
= intel_vgpu_get_bar0_addr(vgpu
);
596 ret
= intel_gvt_ops
->emulate_mmio_write(vgpu
,
597 bar0_start
+ pos
, buf
, count
);
599 uint64_t bar0_start
= intel_vgpu_get_bar0_addr(vgpu
);
601 ret
= intel_gvt_ops
->emulate_mmio_read(vgpu
,
602 bar0_start
+ pos
, buf
, count
);
605 case VFIO_PCI_BAR2_REGION_INDEX
:
606 case VFIO_PCI_BAR3_REGION_INDEX
:
607 case VFIO_PCI_BAR4_REGION_INDEX
:
608 case VFIO_PCI_BAR5_REGION_INDEX
:
609 case VFIO_PCI_VGA_REGION_INDEX
:
610 case VFIO_PCI_ROM_REGION_INDEX
:
612 gvt_err("unsupported region: %u\n", index
);
615 return ret
== 0 ? count
: ret
;
618 static ssize_t
intel_vgpu_read(struct mdev_device
*mdev
, char __user
*buf
,
619 size_t count
, loff_t
*ppos
)
621 unsigned int done
= 0;
627 if (count
>= 4 && !(*ppos
% 4)) {
630 ret
= intel_vgpu_rw(mdev
, (char *)&val
, sizeof(val
),
635 if (copy_to_user(buf
, &val
, sizeof(val
)))
639 } else if (count
>= 2 && !(*ppos
% 2)) {
642 ret
= intel_vgpu_rw(mdev
, (char *)&val
, sizeof(val
),
647 if (copy_to_user(buf
, &val
, sizeof(val
)))
654 ret
= intel_vgpu_rw(mdev
, &val
, sizeof(val
), ppos
,
659 if (copy_to_user(buf
, &val
, sizeof(val
)))
677 static ssize_t
intel_vgpu_write(struct mdev_device
*mdev
,
678 const char __user
*buf
,
679 size_t count
, loff_t
*ppos
)
681 unsigned int done
= 0;
687 if (count
>= 4 && !(*ppos
% 4)) {
690 if (copy_from_user(&val
, buf
, sizeof(val
)))
693 ret
= intel_vgpu_rw(mdev
, (char *)&val
, sizeof(val
),
699 } else if (count
>= 2 && !(*ppos
% 2)) {
702 if (copy_from_user(&val
, buf
, sizeof(val
)))
705 ret
= intel_vgpu_rw(mdev
, (char *)&val
,
706 sizeof(val
), ppos
, true);
714 if (copy_from_user(&val
, buf
, sizeof(val
)))
717 ret
= intel_vgpu_rw(mdev
, &val
, sizeof(val
),
736 static int intel_vgpu_mmap(struct mdev_device
*mdev
, struct vm_area_struct
*vma
)
740 unsigned long req_size
, pgoff
= 0;
742 struct intel_vgpu
*vgpu
= mdev_get_drvdata(mdev
);
744 index
= vma
->vm_pgoff
>> (VFIO_PCI_OFFSET_SHIFT
- PAGE_SHIFT
);
745 if (index
>= VFIO_PCI_ROM_REGION_INDEX
)
748 if (vma
->vm_end
< vma
->vm_start
)
750 if ((vma
->vm_flags
& VM_SHARED
) == 0)
752 if (index
!= VFIO_PCI_BAR2_REGION_INDEX
)
755 pg_prot
= vma
->vm_page_prot
;
756 virtaddr
= vma
->vm_start
;
757 req_size
= vma
->vm_end
- vma
->vm_start
;
758 pgoff
= vgpu_aperture_pa_base(vgpu
) >> PAGE_SHIFT
;
760 return remap_pfn_range(vma
, virtaddr
, pgoff
, req_size
, pg_prot
);
763 static int intel_vgpu_get_irq_count(struct intel_vgpu
*vgpu
, int type
)
765 if (type
== VFIO_PCI_INTX_IRQ_INDEX
|| type
== VFIO_PCI_MSI_IRQ_INDEX
)
771 static int intel_vgpu_set_intx_mask(struct intel_vgpu
*vgpu
,
772 unsigned int index
, unsigned int start
,
773 unsigned int count
, uint32_t flags
,
779 static int intel_vgpu_set_intx_unmask(struct intel_vgpu
*vgpu
,
780 unsigned int index
, unsigned int start
,
781 unsigned int count
, uint32_t flags
, void *data
)
786 static int intel_vgpu_set_intx_trigger(struct intel_vgpu
*vgpu
,
787 unsigned int index
, unsigned int start
, unsigned int count
,
788 uint32_t flags
, void *data
)
793 static int intel_vgpu_set_msi_trigger(struct intel_vgpu
*vgpu
,
794 unsigned int index
, unsigned int start
, unsigned int count
,
795 uint32_t flags
, void *data
)
797 struct eventfd_ctx
*trigger
;
799 if (flags
& VFIO_IRQ_SET_DATA_EVENTFD
) {
800 int fd
= *(int *)data
;
802 trigger
= eventfd_ctx_fdget(fd
);
803 if (IS_ERR(trigger
)) {
804 gvt_err("eventfd_ctx_fdget failed\n");
805 return PTR_ERR(trigger
);
807 vgpu
->vdev
.msi_trigger
= trigger
;
813 static int intel_vgpu_set_irqs(struct intel_vgpu
*vgpu
, uint32_t flags
,
814 unsigned int index
, unsigned int start
, unsigned int count
,
817 int (*func
)(struct intel_vgpu
*vgpu
, unsigned int index
,
818 unsigned int start
, unsigned int count
, uint32_t flags
,
822 case VFIO_PCI_INTX_IRQ_INDEX
:
823 switch (flags
& VFIO_IRQ_SET_ACTION_TYPE_MASK
) {
824 case VFIO_IRQ_SET_ACTION_MASK
:
825 func
= intel_vgpu_set_intx_mask
;
827 case VFIO_IRQ_SET_ACTION_UNMASK
:
828 func
= intel_vgpu_set_intx_unmask
;
830 case VFIO_IRQ_SET_ACTION_TRIGGER
:
831 func
= intel_vgpu_set_intx_trigger
;
835 case VFIO_PCI_MSI_IRQ_INDEX
:
836 switch (flags
& VFIO_IRQ_SET_ACTION_TYPE_MASK
) {
837 case VFIO_IRQ_SET_ACTION_MASK
:
838 case VFIO_IRQ_SET_ACTION_UNMASK
:
839 /* XXX Need masking support exported */
841 case VFIO_IRQ_SET_ACTION_TRIGGER
:
842 func
= intel_vgpu_set_msi_trigger
;
851 return func(vgpu
, index
, start
, count
, flags
, data
);
854 static long intel_vgpu_ioctl(struct mdev_device
*mdev
, unsigned int cmd
,
857 struct intel_vgpu
*vgpu
= mdev_get_drvdata(mdev
);
860 gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu
->id
, cmd
);
862 if (cmd
== VFIO_DEVICE_GET_INFO
) {
863 struct vfio_device_info info
;
865 minsz
= offsetofend(struct vfio_device_info
, num_irqs
);
867 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
870 if (info
.argsz
< minsz
)
873 info
.flags
= VFIO_DEVICE_FLAGS_PCI
;
874 info
.flags
|= VFIO_DEVICE_FLAGS_RESET
;
875 info
.num_regions
= VFIO_PCI_NUM_REGIONS
;
876 info
.num_irqs
= VFIO_PCI_NUM_IRQS
;
878 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
881 } else if (cmd
== VFIO_DEVICE_GET_REGION_INFO
) {
882 struct vfio_region_info info
;
883 struct vfio_info_cap caps
= { .buf
= NULL
, .size
= 0 };
885 struct vfio_region_info_cap_sparse_mmap
*sparse
= NULL
;
890 minsz
= offsetofend(struct vfio_region_info
, offset
);
892 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
895 if (info
.argsz
< minsz
)
898 switch (info
.index
) {
899 case VFIO_PCI_CONFIG_REGION_INDEX
:
900 info
.offset
= VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
901 info
.size
= INTEL_GVT_MAX_CFG_SPACE_SZ
;
902 info
.flags
= VFIO_REGION_INFO_FLAG_READ
|
903 VFIO_REGION_INFO_FLAG_WRITE
;
905 case VFIO_PCI_BAR0_REGION_INDEX
:
906 info
.offset
= VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
907 info
.size
= vgpu
->cfg_space
.bar
[info
.index
].size
;
913 info
.flags
= VFIO_REGION_INFO_FLAG_READ
|
914 VFIO_REGION_INFO_FLAG_WRITE
;
916 case VFIO_PCI_BAR1_REGION_INDEX
:
917 info
.offset
= VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
921 case VFIO_PCI_BAR2_REGION_INDEX
:
922 info
.offset
= VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
923 info
.flags
= VFIO_REGION_INFO_FLAG_CAPS
|
924 VFIO_REGION_INFO_FLAG_MMAP
|
925 VFIO_REGION_INFO_FLAG_READ
|
926 VFIO_REGION_INFO_FLAG_WRITE
;
927 info
.size
= gvt_aperture_sz(vgpu
->gvt
);
929 size
= sizeof(*sparse
) +
930 (nr_areas
* sizeof(*sparse
->areas
));
931 sparse
= kzalloc(size
, GFP_KERNEL
);
935 sparse
->nr_areas
= nr_areas
;
936 cap_type_id
= VFIO_REGION_INFO_CAP_SPARSE_MMAP
;
937 sparse
->areas
[0].offset
=
938 PAGE_ALIGN(vgpu_aperture_offset(vgpu
));
939 sparse
->areas
[0].size
= vgpu_aperture_sz(vgpu
);
947 case VFIO_PCI_BAR3_REGION_INDEX
... VFIO_PCI_BAR5_REGION_INDEX
:
948 info
.offset
= VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
952 gvt_dbg_core("get region info bar:%d\n", info
.index
);
955 case VFIO_PCI_ROM_REGION_INDEX
:
956 case VFIO_PCI_VGA_REGION_INDEX
:
957 gvt_dbg_core("get region info index:%d\n", info
.index
);
961 struct vfio_region_info_cap_type cap_type
;
963 if (info
.index
>= VFIO_PCI_NUM_REGIONS
+
964 vgpu
->vdev
.num_regions
)
967 i
= info
.index
- VFIO_PCI_NUM_REGIONS
;
970 VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
971 info
.size
= vgpu
->vdev
.region
[i
].size
;
972 info
.flags
= vgpu
->vdev
.region
[i
].flags
;
974 cap_type
.type
= vgpu
->vdev
.region
[i
].type
;
975 cap_type
.subtype
= vgpu
->vdev
.region
[i
].subtype
;
977 ret
= vfio_info_add_capability(&caps
,
978 VFIO_REGION_INFO_CAP_TYPE
,
985 if ((info
.flags
& VFIO_REGION_INFO_FLAG_CAPS
) && sparse
) {
986 switch (cap_type_id
) {
987 case VFIO_REGION_INFO_CAP_SPARSE_MMAP
:
988 ret
= vfio_info_add_capability(&caps
,
989 VFIO_REGION_INFO_CAP_SPARSE_MMAP
,
1001 if (info
.argsz
< sizeof(info
) + caps
.size
) {
1002 info
.argsz
= sizeof(info
) + caps
.size
;
1003 info
.cap_offset
= 0;
1005 vfio_info_cap_shift(&caps
, sizeof(info
));
1006 if (copy_to_user((void __user
*)arg
+
1007 sizeof(info
), caps
.buf
,
1012 info
.cap_offset
= sizeof(info
);
1018 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
1020 } else if (cmd
== VFIO_DEVICE_GET_IRQ_INFO
) {
1021 struct vfio_irq_info info
;
1023 minsz
= offsetofend(struct vfio_irq_info
, count
);
1025 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
1028 if (info
.argsz
< minsz
|| info
.index
>= VFIO_PCI_NUM_IRQS
)
1031 switch (info
.index
) {
1032 case VFIO_PCI_INTX_IRQ_INDEX
:
1033 case VFIO_PCI_MSI_IRQ_INDEX
:
1039 info
.flags
= VFIO_IRQ_INFO_EVENTFD
;
1041 info
.count
= intel_vgpu_get_irq_count(vgpu
, info
.index
);
1043 if (info
.index
== VFIO_PCI_INTX_IRQ_INDEX
)
1044 info
.flags
|= (VFIO_IRQ_INFO_MASKABLE
|
1045 VFIO_IRQ_INFO_AUTOMASKED
);
1047 info
.flags
|= VFIO_IRQ_INFO_NORESIZE
;
1049 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
1051 } else if (cmd
== VFIO_DEVICE_SET_IRQS
) {
1052 struct vfio_irq_set hdr
;
1055 size_t data_size
= 0;
1057 minsz
= offsetofend(struct vfio_irq_set
, count
);
1059 if (copy_from_user(&hdr
, (void __user
*)arg
, minsz
))
1062 if (!(hdr
.flags
& VFIO_IRQ_SET_DATA_NONE
)) {
1063 int max
= intel_vgpu_get_irq_count(vgpu
, hdr
.index
);
1065 ret
= vfio_set_irqs_validate_and_prepare(&hdr
, max
,
1066 VFIO_PCI_NUM_IRQS
, &data_size
);
1068 gvt_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
1072 data
= memdup_user((void __user
*)(arg
+ minsz
),
1075 return PTR_ERR(data
);
1079 ret
= intel_vgpu_set_irqs(vgpu
, hdr
.flags
, hdr
.index
,
1080 hdr
.start
, hdr
.count
, data
);
1084 } else if (cmd
== VFIO_DEVICE_RESET
) {
1085 intel_gvt_ops
->vgpu_reset(vgpu
);
1092 static const struct mdev_parent_ops intel_vgpu_ops
= {
1093 .supported_type_groups
= intel_vgpu_type_groups
,
1094 .create
= intel_vgpu_create
,
1095 .remove
= intel_vgpu_remove
,
1097 .open
= intel_vgpu_open
,
1098 .release
= intel_vgpu_release
,
1100 .read
= intel_vgpu_read
,
1101 .write
= intel_vgpu_write
,
1102 .mmap
= intel_vgpu_mmap
,
1103 .ioctl
= intel_vgpu_ioctl
,
1106 static int kvmgt_host_init(struct device
*dev
, void *gvt
, const void *ops
)
1108 if (!intel_gvt_init_vgpu_type_groups(gvt
))
1111 intel_gvt_ops
= ops
;
1113 return mdev_register_device(dev
, &intel_vgpu_ops
);
1116 static void kvmgt_host_exit(struct device
*dev
, void *gvt
)
1118 intel_gvt_cleanup_vgpu_type_groups(gvt
);
1119 mdev_unregister_device(dev
);
1122 static int kvmgt_write_protect_add(unsigned long handle
, u64 gfn
)
1124 struct kvmgt_guest_info
*info
;
1126 struct kvm_memory_slot
*slot
;
1129 if (!handle_valid(handle
))
1132 info
= (struct kvmgt_guest_info
*)handle
;
1135 idx
= srcu_read_lock(&kvm
->srcu
);
1136 slot
= gfn_to_memslot(kvm
, gfn
);
1138 spin_lock(&kvm
->mmu_lock
);
1140 if (kvmgt_gfn_is_write_protected(info
, gfn
))
1143 kvm_slot_page_track_add_page(kvm
, slot
, gfn
, KVM_PAGE_TRACK_WRITE
);
1144 kvmgt_protect_table_add(info
, gfn
);
1147 spin_unlock(&kvm
->mmu_lock
);
1148 srcu_read_unlock(&kvm
->srcu
, idx
);
1152 static int kvmgt_write_protect_remove(unsigned long handle
, u64 gfn
)
1154 struct kvmgt_guest_info
*info
;
1156 struct kvm_memory_slot
*slot
;
1159 if (!handle_valid(handle
))
1162 info
= (struct kvmgt_guest_info
*)handle
;
1165 idx
= srcu_read_lock(&kvm
->srcu
);
1166 slot
= gfn_to_memslot(kvm
, gfn
);
1168 spin_lock(&kvm
->mmu_lock
);
1170 if (!kvmgt_gfn_is_write_protected(info
, gfn
))
1173 kvm_slot_page_track_remove_page(kvm
, slot
, gfn
, KVM_PAGE_TRACK_WRITE
);
1174 kvmgt_protect_table_del(info
, gfn
);
1177 spin_unlock(&kvm
->mmu_lock
);
1178 srcu_read_unlock(&kvm
->srcu
, idx
);
1182 static void kvmgt_page_track_write(struct kvm_vcpu
*vcpu
, gpa_t gpa
,
1183 const u8
*val
, int len
,
1184 struct kvm_page_track_notifier_node
*node
)
1186 struct kvmgt_guest_info
*info
= container_of(node
,
1187 struct kvmgt_guest_info
, track_node
);
1189 if (kvmgt_gfn_is_write_protected(info
, gpa_to_gfn(gpa
)))
1190 intel_gvt_ops
->emulate_mmio_write(info
->vgpu
, gpa
,
1194 static void kvmgt_page_track_flush_slot(struct kvm
*kvm
,
1195 struct kvm_memory_slot
*slot
,
1196 struct kvm_page_track_notifier_node
*node
)
1200 struct kvmgt_guest_info
*info
= container_of(node
,
1201 struct kvmgt_guest_info
, track_node
);
1203 spin_lock(&kvm
->mmu_lock
);
1204 for (i
= 0; i
< slot
->npages
; i
++) {
1205 gfn
= slot
->base_gfn
+ i
;
1206 if (kvmgt_gfn_is_write_protected(info
, gfn
)) {
1207 kvm_slot_page_track_remove_page(kvm
, slot
, gfn
,
1208 KVM_PAGE_TRACK_WRITE
);
1209 kvmgt_protect_table_del(info
, gfn
);
1212 spin_unlock(&kvm
->mmu_lock
);
1215 static bool kvmgt_check_guest(void)
1217 unsigned int eax
, ebx
, ecx
, edx
;
1221 eax
= KVM_CPUID_SIGNATURE
;
1222 ebx
= ecx
= edx
= 0;
1224 asm volatile ("cpuid"
1225 : "+a"(eax
), "=b"(ebx
), "=c"(ecx
), "=d"(edx
)
1228 i
= (unsigned int *)s
;
1233 return !strncmp(s
, "KVMKVMKVM", strlen("KVMKVMKVM"));
1238 * It's actually impossible to check if we are running in KVM host,
1239 * since the "KVM host" is simply native. So we only dectect guest here.
1241 static int kvmgt_detect_host(void)
1243 #ifdef CONFIG_INTEL_IOMMU
1244 if (intel_iommu_gfx_mapped
) {
1245 gvt_err("Hardware IOMMU compatibility not yet supported, try to boot with intel_iommu=igfx_off\n");
1249 return kvmgt_check_guest() ? -ENODEV
: 0;
1252 static bool __kvmgt_vgpu_exist(struct intel_vgpu
*vgpu
, struct kvm
*kvm
)
1254 struct intel_vgpu
*itr
;
1255 struct kvmgt_guest_info
*info
;
1259 mutex_lock(&vgpu
->gvt
->lock
);
1260 for_each_active_vgpu(vgpu
->gvt
, itr
, id
) {
1261 if (!handle_valid(itr
->handle
))
1264 info
= (struct kvmgt_guest_info
*)itr
->handle
;
1265 if (kvm
&& kvm
== info
->kvm
) {
1271 mutex_unlock(&vgpu
->gvt
->lock
);
1275 static int kvmgt_guest_init(struct mdev_device
*mdev
)
1277 struct kvmgt_guest_info
*info
;
1278 struct intel_vgpu
*vgpu
;
1281 vgpu
= mdev_get_drvdata(mdev
);
1282 if (handle_valid(vgpu
->handle
))
1285 kvm
= vgpu
->vdev
.kvm
;
1286 if (!kvm
|| kvm
->mm
!= current
->mm
) {
1287 gvt_err("KVM is required to use Intel vGPU\n");
1291 if (__kvmgt_vgpu_exist(vgpu
, kvm
))
1294 info
= vzalloc(sizeof(struct kvmgt_guest_info
));
1298 vgpu
->handle
= (unsigned long)info
;
1302 kvmgt_protect_table_init(info
);
1303 gvt_cache_init(vgpu
);
1305 info
->track_node
.track_write
= kvmgt_page_track_write
;
1306 info
->track_node
.track_flush_slot
= kvmgt_page_track_flush_slot
;
1307 kvm_page_track_register_notifier(kvm
, &info
->track_node
);
1312 static bool kvmgt_guest_exit(struct kvmgt_guest_info
*info
)
1314 struct intel_vgpu
*vgpu
;
1317 gvt_err("kvmgt_guest_info invalid\n");
1323 kvm_page_track_unregister_notifier(info
->kvm
, &info
->track_node
);
1324 kvmgt_protect_table_destroy(info
);
1325 gvt_cache_destroy(vgpu
);
1331 static int kvmgt_attach_vgpu(void *vgpu
, unsigned long *handle
)
1333 /* nothing to do here */
1337 static void kvmgt_detach_vgpu(unsigned long handle
)
1339 /* nothing to do here */
1342 static int kvmgt_inject_msi(unsigned long handle
, u32 addr
, u16 data
)
1344 struct kvmgt_guest_info
*info
;
1345 struct intel_vgpu
*vgpu
;
1347 if (!handle_valid(handle
))
1350 info
= (struct kvmgt_guest_info
*)handle
;
1353 if (eventfd_signal(vgpu
->vdev
.msi_trigger
, 1) == 1)
1359 static unsigned long kvmgt_gfn_to_pfn(unsigned long handle
, unsigned long gfn
)
1362 struct kvmgt_guest_info
*info
;
1366 if (!handle_valid(handle
))
1367 return INTEL_GVT_INVALID_ADDR
;
1369 info
= (struct kvmgt_guest_info
*)handle
;
1370 pfn
= gvt_cache_find(info
->vgpu
, gfn
);
1374 pfn
= INTEL_GVT_INVALID_ADDR
;
1375 dev
= mdev_dev(info
->vgpu
->vdev
.mdev
);
1376 rc
= vfio_pin_pages(dev
, &gfn
, 1, IOMMU_READ
| IOMMU_WRITE
, &pfn
);
1378 gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn
, rc
);
1379 return INTEL_GVT_INVALID_ADDR
;
1382 gvt_cache_add(info
->vgpu
, gfn
, pfn
);
1386 static int kvmgt_rw_gpa(unsigned long handle
, unsigned long gpa
,
1387 void *buf
, unsigned long len
, bool write
)
1389 struct kvmgt_guest_info
*info
;
1392 bool kthread
= current
->mm
== NULL
;
1394 if (!handle_valid(handle
))
1397 info
= (struct kvmgt_guest_info
*)handle
;
1403 ret
= write
? kvm_write_guest(kvm
, gpa
, buf
, len
) :
1404 kvm_read_guest(kvm
, gpa
, buf
, len
);
1412 static int kvmgt_read_gpa(unsigned long handle
, unsigned long gpa
,
1413 void *buf
, unsigned long len
)
1415 return kvmgt_rw_gpa(handle
, gpa
, buf
, len
, false);
1418 static int kvmgt_write_gpa(unsigned long handle
, unsigned long gpa
,
1419 void *buf
, unsigned long len
)
1421 return kvmgt_rw_gpa(handle
, gpa
, buf
, len
, true);
1424 static unsigned long kvmgt_virt_to_pfn(void *addr
)
1426 return PFN_DOWN(__pa(addr
));
1429 struct intel_gvt_mpt kvmgt_mpt
= {
1430 .detect_host
= kvmgt_detect_host
,
1431 .host_init
= kvmgt_host_init
,
1432 .host_exit
= kvmgt_host_exit
,
1433 .attach_vgpu
= kvmgt_attach_vgpu
,
1434 .detach_vgpu
= kvmgt_detach_vgpu
,
1435 .inject_msi
= kvmgt_inject_msi
,
1436 .from_virt_to_mfn
= kvmgt_virt_to_pfn
,
1437 .set_wp_page
= kvmgt_write_protect_add
,
1438 .unset_wp_page
= kvmgt_write_protect_remove
,
1439 .read_gpa
= kvmgt_read_gpa
,
1440 .write_gpa
= kvmgt_write_gpa
,
1441 .gfn_to_mfn
= kvmgt_gfn_to_pfn
,
1443 EXPORT_SYMBOL_GPL(kvmgt_mpt
);
1445 static int __init
kvmgt_init(void)
1450 static void __exit
kvmgt_exit(void)
1454 module_init(kvmgt_init
);
1455 module_exit(kvmgt_exit
);
1457 MODULE_LICENSE("GPL and additional rights");
1458 MODULE_AUTHOR("Intel Corporation");