2 * KVMGT - the implementation of Intel mediated pass-through framework for KVM
4 * Copyright(c) 2014-2016 Intel Corporation. All rights reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Kevin Tian <kevin.tian@intel.com>
27 * Jike Song <jike.song@intel.com>
28 * Xiaoguang Chen <xiaoguang.chen@intel.com>
31 #include <linux/init.h>
32 #include <linux/device.h>
34 #include <linux/kthread.h>
35 #include <linux/sched/mm.h>
36 #include <linux/types.h>
37 #include <linux/list.h>
38 #include <linux/rbtree.h>
39 #include <linux/spinlock.h>
40 #include <linux/eventfd.h>
41 #include <linux/uuid.h>
42 #include <linux/kvm_host.h>
43 #include <linux/vfio.h>
44 #include <linux/mdev.h>
45 #include <linux/debugfs.h>
47 #include <linux/nospec.h>
52 static const struct intel_gvt_ops
*intel_gvt_ops
;
54 /* helper macros copied from vfio-pci */
55 #define VFIO_PCI_OFFSET_SHIFT 40
56 #define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
57 #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
58 #define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
60 #define EDID_BLOB_OFFSET (PAGE_SIZE/2)
62 #define OPREGION_SIGNATURE "IntelGraphicsMem"
65 struct intel_vgpu_regops
{
66 size_t (*rw
)(struct intel_vgpu
*vgpu
, char *buf
,
67 size_t count
, loff_t
*ppos
, bool iswrite
);
68 void (*release
)(struct intel_vgpu
*vgpu
,
69 struct vfio_region
*region
);
77 const struct intel_vgpu_regops
*ops
;
81 struct vfio_edid_region
{
82 struct vfio_region_gfx_edid vfio_edid_regs
;
88 struct hlist_node hnode
;
91 struct kvmgt_guest_info
{
93 struct intel_vgpu
*vgpu
;
94 struct kvm_page_track_notifier_node track_node
;
95 #define NR_BKT (1 << 18)
96 struct hlist_head ptable
[NR_BKT
];
98 struct dentry
*debugfs_cache_entries
;
102 struct intel_vgpu
*vgpu
;
103 struct rb_node gfn_node
;
104 struct rb_node dma_addr_node
;
112 struct intel_vgpu
*vgpu
;
113 struct mdev_device
*mdev
;
114 struct vfio_region
*region
;
116 struct eventfd_ctx
*intx_trigger
;
117 struct eventfd_ctx
*msi_trigger
;
120 * Two caches are used to avoid mapping duplicated pages (eg.
121 * scratch pages). This help to reduce dma setup overhead.
123 struct rb_root gfn_cache
;
124 struct rb_root dma_addr_cache
;
125 unsigned long nr_cache_entries
;
126 struct mutex cache_lock
;
128 struct notifier_block iommu_notifier
;
129 struct notifier_block group_notifier
;
131 struct work_struct release_work
;
133 struct vfio_device
*vfio_device
;
134 struct vfio_group
*vfio_group
;
137 static inline struct kvmgt_vdev
*kvmgt_vdev(struct intel_vgpu
*vgpu
)
139 return intel_vgpu_vdev(vgpu
);
142 static inline bool handle_valid(unsigned long handle
)
144 return !!(handle
& ~0xff);
147 static int kvmgt_guest_init(struct mdev_device
*mdev
);
148 static void intel_vgpu_release_work(struct work_struct
*work
);
149 static bool kvmgt_guest_exit(struct kvmgt_guest_info
*info
);
151 static void gvt_unpin_guest_page(struct intel_vgpu
*vgpu
, unsigned long gfn
,
154 struct drm_i915_private
*i915
= vgpu
->gvt
->gt
->i915
;
155 struct kvmgt_vdev
*vdev
= kvmgt_vdev(vgpu
);
160 total_pages
= roundup(size
, PAGE_SIZE
) / PAGE_SIZE
;
162 for (npage
= 0; npage
< total_pages
; npage
++) {
163 unsigned long cur_gfn
= gfn
+ npage
;
165 ret
= vfio_group_unpin_pages(vdev
->vfio_group
, &cur_gfn
, 1);
166 drm_WARN_ON(&i915
->drm
, ret
!= 1);
170 /* Pin a normal or compound guest page for dma. */
171 static int gvt_pin_guest_page(struct intel_vgpu
*vgpu
, unsigned long gfn
,
172 unsigned long size
, struct page
**page
)
174 struct kvmgt_vdev
*vdev
= kvmgt_vdev(vgpu
);
175 unsigned long base_pfn
= 0;
180 total_pages
= roundup(size
, PAGE_SIZE
) / PAGE_SIZE
;
182 * We pin the pages one-by-one to avoid allocating a big arrary
183 * on stack to hold pfns.
185 for (npage
= 0; npage
< total_pages
; npage
++) {
186 unsigned long cur_gfn
= gfn
+ npage
;
189 ret
= vfio_group_pin_pages(vdev
->vfio_group
, &cur_gfn
, 1,
190 IOMMU_READ
| IOMMU_WRITE
, &pfn
);
192 gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
197 if (!pfn_valid(pfn
)) {
198 gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn
);
206 else if (base_pfn
+ npage
!= pfn
) {
207 gvt_vgpu_err("The pages are not continuous\n");
214 *page
= pfn_to_page(base_pfn
);
217 gvt_unpin_guest_page(vgpu
, gfn
, npage
* PAGE_SIZE
);
221 static int gvt_dma_map_page(struct intel_vgpu
*vgpu
, unsigned long gfn
,
222 dma_addr_t
*dma_addr
, unsigned long size
)
224 struct device
*dev
= &vgpu
->gvt
->gt
->i915
->drm
.pdev
->dev
;
225 struct page
*page
= NULL
;
228 ret
= gvt_pin_guest_page(vgpu
, gfn
, size
, &page
);
232 /* Setup DMA mapping. */
233 *dma_addr
= dma_map_page(dev
, page
, 0, size
, PCI_DMA_BIDIRECTIONAL
);
234 if (dma_mapping_error(dev
, *dma_addr
)) {
235 gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
236 page_to_pfn(page
), ret
);
237 gvt_unpin_guest_page(vgpu
, gfn
, size
);
244 static void gvt_dma_unmap_page(struct intel_vgpu
*vgpu
, unsigned long gfn
,
245 dma_addr_t dma_addr
, unsigned long size
)
247 struct device
*dev
= &vgpu
->gvt
->gt
->i915
->drm
.pdev
->dev
;
249 dma_unmap_page(dev
, dma_addr
, size
, PCI_DMA_BIDIRECTIONAL
);
250 gvt_unpin_guest_page(vgpu
, gfn
, size
);
253 static struct gvt_dma
*__gvt_cache_find_dma_addr(struct intel_vgpu
*vgpu
,
256 struct rb_node
*node
= kvmgt_vdev(vgpu
)->dma_addr_cache
.rb_node
;
260 itr
= rb_entry(node
, struct gvt_dma
, dma_addr_node
);
262 if (dma_addr
< itr
->dma_addr
)
263 node
= node
->rb_left
;
264 else if (dma_addr
> itr
->dma_addr
)
265 node
= node
->rb_right
;
272 static struct gvt_dma
*__gvt_cache_find_gfn(struct intel_vgpu
*vgpu
, gfn_t gfn
)
274 struct rb_node
*node
= kvmgt_vdev(vgpu
)->gfn_cache
.rb_node
;
278 itr
= rb_entry(node
, struct gvt_dma
, gfn_node
);
281 node
= node
->rb_left
;
282 else if (gfn
> itr
->gfn
)
283 node
= node
->rb_right
;
290 static int __gvt_cache_add(struct intel_vgpu
*vgpu
, gfn_t gfn
,
291 dma_addr_t dma_addr
, unsigned long size
)
293 struct gvt_dma
*new, *itr
;
294 struct rb_node
**link
, *parent
= NULL
;
295 struct kvmgt_vdev
*vdev
= kvmgt_vdev(vgpu
);
297 new = kzalloc(sizeof(struct gvt_dma
), GFP_KERNEL
);
303 new->dma_addr
= dma_addr
;
305 kref_init(&new->ref
);
307 /* gfn_cache maps gfn to struct gvt_dma. */
308 link
= &vdev
->gfn_cache
.rb_node
;
311 itr
= rb_entry(parent
, struct gvt_dma
, gfn_node
);
314 link
= &parent
->rb_left
;
316 link
= &parent
->rb_right
;
318 rb_link_node(&new->gfn_node
, parent
, link
);
319 rb_insert_color(&new->gfn_node
, &vdev
->gfn_cache
);
321 /* dma_addr_cache maps dma addr to struct gvt_dma. */
323 link
= &vdev
->dma_addr_cache
.rb_node
;
326 itr
= rb_entry(parent
, struct gvt_dma
, dma_addr_node
);
328 if (dma_addr
< itr
->dma_addr
)
329 link
= &parent
->rb_left
;
331 link
= &parent
->rb_right
;
333 rb_link_node(&new->dma_addr_node
, parent
, link
);
334 rb_insert_color(&new->dma_addr_node
, &vdev
->dma_addr_cache
);
336 vdev
->nr_cache_entries
++;
340 static void __gvt_cache_remove_entry(struct intel_vgpu
*vgpu
,
341 struct gvt_dma
*entry
)
343 struct kvmgt_vdev
*vdev
= kvmgt_vdev(vgpu
);
345 rb_erase(&entry
->gfn_node
, &vdev
->gfn_cache
);
346 rb_erase(&entry
->dma_addr_node
, &vdev
->dma_addr_cache
);
348 vdev
->nr_cache_entries
--;
351 static void gvt_cache_destroy(struct intel_vgpu
*vgpu
)
354 struct rb_node
*node
= NULL
;
355 struct kvmgt_vdev
*vdev
= kvmgt_vdev(vgpu
);
358 mutex_lock(&vdev
->cache_lock
);
359 node
= rb_first(&vdev
->gfn_cache
);
361 mutex_unlock(&vdev
->cache_lock
);
364 dma
= rb_entry(node
, struct gvt_dma
, gfn_node
);
365 gvt_dma_unmap_page(vgpu
, dma
->gfn
, dma
->dma_addr
, dma
->size
);
366 __gvt_cache_remove_entry(vgpu
, dma
);
367 mutex_unlock(&vdev
->cache_lock
);
371 static void gvt_cache_init(struct intel_vgpu
*vgpu
)
373 struct kvmgt_vdev
*vdev
= kvmgt_vdev(vgpu
);
375 vdev
->gfn_cache
= RB_ROOT
;
376 vdev
->dma_addr_cache
= RB_ROOT
;
377 vdev
->nr_cache_entries
= 0;
378 mutex_init(&vdev
->cache_lock
);
381 static void kvmgt_protect_table_init(struct kvmgt_guest_info
*info
)
383 hash_init(info
->ptable
);
386 static void kvmgt_protect_table_destroy(struct kvmgt_guest_info
*info
)
388 struct kvmgt_pgfn
*p
;
389 struct hlist_node
*tmp
;
392 hash_for_each_safe(info
->ptable
, i
, tmp
, p
, hnode
) {
398 static struct kvmgt_pgfn
*
399 __kvmgt_protect_table_find(struct kvmgt_guest_info
*info
, gfn_t gfn
)
401 struct kvmgt_pgfn
*p
, *res
= NULL
;
403 hash_for_each_possible(info
->ptable
, p
, hnode
, gfn
) {
413 static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info
*info
,
416 struct kvmgt_pgfn
*p
;
418 p
= __kvmgt_protect_table_find(info
, gfn
);
422 static void kvmgt_protect_table_add(struct kvmgt_guest_info
*info
, gfn_t gfn
)
424 struct kvmgt_pgfn
*p
;
426 if (kvmgt_gfn_is_write_protected(info
, gfn
))
429 p
= kzalloc(sizeof(struct kvmgt_pgfn
), GFP_ATOMIC
);
430 if (WARN(!p
, "gfn: 0x%llx\n", gfn
))
434 hash_add(info
->ptable
, &p
->hnode
, gfn
);
437 static void kvmgt_protect_table_del(struct kvmgt_guest_info
*info
,
440 struct kvmgt_pgfn
*p
;
442 p
= __kvmgt_protect_table_find(info
, gfn
);
449 static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu
*vgpu
, char *buf
,
450 size_t count
, loff_t
*ppos
, bool iswrite
)
452 struct kvmgt_vdev
*vdev
= kvmgt_vdev(vgpu
);
453 unsigned int i
= VFIO_PCI_OFFSET_TO_INDEX(*ppos
) -
454 VFIO_PCI_NUM_REGIONS
;
455 void *base
= vdev
->region
[i
].data
;
456 loff_t pos
= *ppos
& VFIO_PCI_OFFSET_MASK
;
459 if (pos
>= vdev
->region
[i
].size
|| iswrite
) {
460 gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
463 count
= min(count
, (size_t)(vdev
->region
[i
].size
- pos
));
464 memcpy(buf
, base
+ pos
, count
);
469 static void intel_vgpu_reg_release_opregion(struct intel_vgpu
*vgpu
,
470 struct vfio_region
*region
)
474 static const struct intel_vgpu_regops intel_vgpu_regops_opregion
= {
475 .rw
= intel_vgpu_reg_rw_opregion
,
476 .release
= intel_vgpu_reg_release_opregion
,
479 static int handle_edid_regs(struct intel_vgpu
*vgpu
,
480 struct vfio_edid_region
*region
, char *buf
,
481 size_t count
, u16 offset
, bool is_write
)
483 struct vfio_region_gfx_edid
*regs
= ®ion
->vfio_edid_regs
;
486 if (offset
+ count
> sizeof(*regs
))
493 data
= *((unsigned int *)buf
);
495 case offsetof(struct vfio_region_gfx_edid
, link_state
):
496 if (data
== VFIO_DEVICE_GFX_LINK_STATE_UP
) {
497 if (!drm_edid_block_valid(
498 (u8
*)region
->edid_blob
,
502 gvt_vgpu_err("invalid EDID blob\n");
505 intel_gvt_ops
->emulate_hotplug(vgpu
, true);
506 } else if (data
== VFIO_DEVICE_GFX_LINK_STATE_DOWN
)
507 intel_gvt_ops
->emulate_hotplug(vgpu
, false);
509 gvt_vgpu_err("invalid EDID link state %d\n",
513 regs
->link_state
= data
;
515 case offsetof(struct vfio_region_gfx_edid
, edid_size
):
516 if (data
> regs
->edid_max_size
) {
517 gvt_vgpu_err("EDID size is bigger than %d!\n",
518 regs
->edid_max_size
);
521 regs
->edid_size
= data
;
525 gvt_vgpu_err("write read-only EDID region at offset %d\n",
530 memcpy(buf
, (char *)regs
+ offset
, count
);
536 static int handle_edid_blob(struct vfio_edid_region
*region
, char *buf
,
537 size_t count
, u16 offset
, bool is_write
)
539 if (offset
+ count
> region
->vfio_edid_regs
.edid_size
)
543 memcpy(region
->edid_blob
+ offset
, buf
, count
);
545 memcpy(buf
, region
->edid_blob
+ offset
, count
);
550 static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu
*vgpu
, char *buf
,
551 size_t count
, loff_t
*ppos
, bool iswrite
)
554 unsigned int i
= VFIO_PCI_OFFSET_TO_INDEX(*ppos
) -
555 VFIO_PCI_NUM_REGIONS
;
556 struct vfio_edid_region
*region
=
557 (struct vfio_edid_region
*)kvmgt_vdev(vgpu
)->region
[i
].data
;
558 loff_t pos
= *ppos
& VFIO_PCI_OFFSET_MASK
;
560 if (pos
< region
->vfio_edid_regs
.edid_offset
) {
561 ret
= handle_edid_regs(vgpu
, region
, buf
, count
, pos
, iswrite
);
563 pos
-= EDID_BLOB_OFFSET
;
564 ret
= handle_edid_blob(region
, buf
, count
, pos
, iswrite
);
568 gvt_vgpu_err("failed to access EDID region\n");
573 static void intel_vgpu_reg_release_edid(struct intel_vgpu
*vgpu
,
574 struct vfio_region
*region
)
579 static const struct intel_vgpu_regops intel_vgpu_regops_edid
= {
580 .rw
= intel_vgpu_reg_rw_edid
,
581 .release
= intel_vgpu_reg_release_edid
,
584 static int intel_vgpu_register_reg(struct intel_vgpu
*vgpu
,
585 unsigned int type
, unsigned int subtype
,
586 const struct intel_vgpu_regops
*ops
,
587 size_t size
, u32 flags
, void *data
)
589 struct kvmgt_vdev
*vdev
= kvmgt_vdev(vgpu
);
590 struct vfio_region
*region
;
592 region
= krealloc(vdev
->region
,
593 (vdev
->num_regions
+ 1) * sizeof(*region
),
598 vdev
->region
= region
;
599 vdev
->region
[vdev
->num_regions
].type
= type
;
600 vdev
->region
[vdev
->num_regions
].subtype
= subtype
;
601 vdev
->region
[vdev
->num_regions
].ops
= ops
;
602 vdev
->region
[vdev
->num_regions
].size
= size
;
603 vdev
->region
[vdev
->num_regions
].flags
= flags
;
604 vdev
->region
[vdev
->num_regions
].data
= data
;
609 static int kvmgt_get_vfio_device(void *p_vgpu
)
611 struct intel_vgpu
*vgpu
= (struct intel_vgpu
*)p_vgpu
;
612 struct kvmgt_vdev
*vdev
= kvmgt_vdev(vgpu
);
614 vdev
->vfio_device
= vfio_device_get_from_dev(
615 mdev_dev(vdev
->mdev
));
616 if (!vdev
->vfio_device
) {
617 gvt_vgpu_err("failed to get vfio device\n");
624 static int kvmgt_set_opregion(void *p_vgpu
)
626 struct intel_vgpu
*vgpu
= (struct intel_vgpu
*)p_vgpu
;
630 /* Each vgpu has its own opregion, although VFIO would create another
631 * one later. This one is used to expose opregion to VFIO. And the
632 * other one created by VFIO later, is used by guest actually.
634 base
= vgpu_opregion(vgpu
)->va
;
638 if (memcmp(base
, OPREGION_SIGNATURE
, 16)) {
643 ret
= intel_vgpu_register_reg(vgpu
,
644 PCI_VENDOR_ID_INTEL
| VFIO_REGION_TYPE_PCI_VENDOR_TYPE
,
645 VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION
,
646 &intel_vgpu_regops_opregion
, OPREGION_SIZE
,
647 VFIO_REGION_INFO_FLAG_READ
, base
);
652 static int kvmgt_set_edid(void *p_vgpu
, int port_num
)
654 struct intel_vgpu
*vgpu
= (struct intel_vgpu
*)p_vgpu
;
655 struct intel_vgpu_port
*port
= intel_vgpu_port(vgpu
, port_num
);
656 struct vfio_edid_region
*base
;
659 base
= kzalloc(sizeof(*base
), GFP_KERNEL
);
663 /* TODO: Add multi-port and EDID extension block support */
664 base
->vfio_edid_regs
.edid_offset
= EDID_BLOB_OFFSET
;
665 base
->vfio_edid_regs
.edid_max_size
= EDID_SIZE
;
666 base
->vfio_edid_regs
.edid_size
= EDID_SIZE
;
667 base
->vfio_edid_regs
.max_xres
= vgpu_edid_xres(port
->id
);
668 base
->vfio_edid_regs
.max_yres
= vgpu_edid_yres(port
->id
);
669 base
->edid_blob
= port
->edid
->edid_block
;
671 ret
= intel_vgpu_register_reg(vgpu
,
672 VFIO_REGION_TYPE_GFX
,
673 VFIO_REGION_SUBTYPE_GFX_EDID
,
674 &intel_vgpu_regops_edid
, EDID_SIZE
,
675 VFIO_REGION_INFO_FLAG_READ
|
676 VFIO_REGION_INFO_FLAG_WRITE
|
677 VFIO_REGION_INFO_FLAG_CAPS
, base
);
682 static void kvmgt_put_vfio_device(void *vgpu
)
684 struct kvmgt_vdev
*vdev
= kvmgt_vdev((struct intel_vgpu
*)vgpu
);
686 if (WARN_ON(!vdev
->vfio_device
))
689 vfio_device_put(vdev
->vfio_device
);
692 static int intel_vgpu_create(struct kobject
*kobj
, struct mdev_device
*mdev
)
694 struct intel_vgpu
*vgpu
= NULL
;
695 struct intel_vgpu_type
*type
;
700 pdev
= mdev_parent_dev(mdev
);
701 gvt
= kdev_to_i915(pdev
)->gvt
;
703 type
= intel_gvt_ops
->gvt_find_vgpu_type(gvt
, kobject_name(kobj
));
705 gvt_vgpu_err("failed to find type %s to create\n",
711 vgpu
= intel_gvt_ops
->vgpu_create(gvt
, type
);
712 if (IS_ERR_OR_NULL(vgpu
)) {
713 ret
= vgpu
== NULL
? -EFAULT
: PTR_ERR(vgpu
);
714 gvt_err("failed to create intel vgpu: %d\n", ret
);
718 INIT_WORK(&kvmgt_vdev(vgpu
)->release_work
, intel_vgpu_release_work
);
720 kvmgt_vdev(vgpu
)->mdev
= mdev
;
721 mdev_set_drvdata(mdev
, vgpu
);
723 gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
724 dev_name(mdev_dev(mdev
)));
731 static int intel_vgpu_remove(struct mdev_device
*mdev
)
733 struct intel_vgpu
*vgpu
= mdev_get_drvdata(mdev
);
735 if (handle_valid(vgpu
->handle
))
738 intel_gvt_ops
->vgpu_destroy(vgpu
);
742 static int intel_vgpu_iommu_notifier(struct notifier_block
*nb
,
743 unsigned long action
, void *data
)
745 struct kvmgt_vdev
*vdev
= container_of(nb
,
748 struct intel_vgpu
*vgpu
= vdev
->vgpu
;
750 if (action
== VFIO_IOMMU_NOTIFY_DMA_UNMAP
) {
751 struct vfio_iommu_type1_dma_unmap
*unmap
= data
;
752 struct gvt_dma
*entry
;
753 unsigned long iov_pfn
, end_iov_pfn
;
755 iov_pfn
= unmap
->iova
>> PAGE_SHIFT
;
756 end_iov_pfn
= iov_pfn
+ unmap
->size
/ PAGE_SIZE
;
758 mutex_lock(&vdev
->cache_lock
);
759 for (; iov_pfn
< end_iov_pfn
; iov_pfn
++) {
760 entry
= __gvt_cache_find_gfn(vgpu
, iov_pfn
);
764 gvt_dma_unmap_page(vgpu
, entry
->gfn
, entry
->dma_addr
,
766 __gvt_cache_remove_entry(vgpu
, entry
);
768 mutex_unlock(&vdev
->cache_lock
);
774 static int intel_vgpu_group_notifier(struct notifier_block
*nb
,
775 unsigned long action
, void *data
)
777 struct kvmgt_vdev
*vdev
= container_of(nb
,
781 /* the only action we care about */
782 if (action
== VFIO_GROUP_NOTIFY_SET_KVM
) {
786 schedule_work(&vdev
->release_work
);
792 static int intel_vgpu_open(struct mdev_device
*mdev
)
794 struct intel_vgpu
*vgpu
= mdev_get_drvdata(mdev
);
795 struct kvmgt_vdev
*vdev
= kvmgt_vdev(vgpu
);
796 unsigned long events
;
798 struct vfio_group
*vfio_group
;
800 vdev
->iommu_notifier
.notifier_call
= intel_vgpu_iommu_notifier
;
801 vdev
->group_notifier
.notifier_call
= intel_vgpu_group_notifier
;
803 events
= VFIO_IOMMU_NOTIFY_DMA_UNMAP
;
804 ret
= vfio_register_notifier(mdev_dev(mdev
), VFIO_IOMMU_NOTIFY
, &events
,
805 &vdev
->iommu_notifier
);
807 gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
812 events
= VFIO_GROUP_NOTIFY_SET_KVM
;
813 ret
= vfio_register_notifier(mdev_dev(mdev
), VFIO_GROUP_NOTIFY
, &events
,
814 &vdev
->group_notifier
);
816 gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
821 vfio_group
= vfio_group_get_external_user_from_dev(mdev_dev(mdev
));
822 if (IS_ERR_OR_NULL(vfio_group
)) {
823 ret
= !vfio_group
? -EFAULT
: PTR_ERR(vfio_group
);
824 gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n");
827 vdev
->vfio_group
= vfio_group
;
829 /* Take a module reference as mdev core doesn't take
830 * a reference for vendor driver.
832 if (!try_module_get(THIS_MODULE
)) {
837 ret
= kvmgt_guest_init(mdev
);
841 intel_gvt_ops
->vgpu_activate(vgpu
);
843 atomic_set(&vdev
->released
, 0);
847 vfio_group_put_external_user(vdev
->vfio_group
);
848 vdev
->vfio_group
= NULL
;
851 vfio_unregister_notifier(mdev_dev(mdev
), VFIO_GROUP_NOTIFY
,
852 &vdev
->group_notifier
);
855 vfio_unregister_notifier(mdev_dev(mdev
), VFIO_IOMMU_NOTIFY
,
856 &vdev
->iommu_notifier
);
861 static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu
*vgpu
)
863 struct kvmgt_vdev
*vdev
= kvmgt_vdev(vgpu
);
864 struct eventfd_ctx
*trigger
;
866 trigger
= vdev
->msi_trigger
;
868 eventfd_ctx_put(trigger
);
869 vdev
->msi_trigger
= NULL
;
873 static void __intel_vgpu_release(struct intel_vgpu
*vgpu
)
875 struct kvmgt_vdev
*vdev
= kvmgt_vdev(vgpu
);
876 struct drm_i915_private
*i915
= vgpu
->gvt
->gt
->i915
;
877 struct kvmgt_guest_info
*info
;
880 if (!handle_valid(vgpu
->handle
))
883 if (atomic_cmpxchg(&vdev
->released
, 0, 1))
886 intel_gvt_ops
->vgpu_release(vgpu
);
888 ret
= vfio_unregister_notifier(mdev_dev(vdev
->mdev
), VFIO_IOMMU_NOTIFY
,
889 &vdev
->iommu_notifier
);
890 drm_WARN(&i915
->drm
, ret
,
891 "vfio_unregister_notifier for iommu failed: %d\n", ret
);
893 ret
= vfio_unregister_notifier(mdev_dev(vdev
->mdev
), VFIO_GROUP_NOTIFY
,
894 &vdev
->group_notifier
);
895 drm_WARN(&i915
->drm
, ret
,
896 "vfio_unregister_notifier for group failed: %d\n", ret
);
898 /* dereference module reference taken at open */
899 module_put(THIS_MODULE
);
901 info
= (struct kvmgt_guest_info
*)vgpu
->handle
;
902 kvmgt_guest_exit(info
);
904 intel_vgpu_release_msi_eventfd_ctx(vgpu
);
905 vfio_group_put_external_user(vdev
->vfio_group
);
911 static void intel_vgpu_release(struct mdev_device
*mdev
)
913 struct intel_vgpu
*vgpu
= mdev_get_drvdata(mdev
);
915 __intel_vgpu_release(vgpu
);
918 static void intel_vgpu_release_work(struct work_struct
*work
)
920 struct kvmgt_vdev
*vdev
= container_of(work
, struct kvmgt_vdev
,
923 __intel_vgpu_release(vdev
->vgpu
);
926 static u64
intel_vgpu_get_bar_addr(struct intel_vgpu
*vgpu
, int bar
)
928 u32 start_lo
, start_hi
;
931 start_lo
= (*(u32
*)(vgpu
->cfg_space
.virtual_cfg_space
+ bar
)) &
932 PCI_BASE_ADDRESS_MEM_MASK
;
933 mem_type
= (*(u32
*)(vgpu
->cfg_space
.virtual_cfg_space
+ bar
)) &
934 PCI_BASE_ADDRESS_MEM_TYPE_MASK
;
937 case PCI_BASE_ADDRESS_MEM_TYPE_64
:
938 start_hi
= (*(u32
*)(vgpu
->cfg_space
.virtual_cfg_space
941 case PCI_BASE_ADDRESS_MEM_TYPE_32
:
942 case PCI_BASE_ADDRESS_MEM_TYPE_1M
:
943 /* 1M mem BAR treated as 32-bit BAR */
945 /* mem unknown type treated as 32-bit BAR */
950 return ((u64
)start_hi
<< 32) | start_lo
;
953 static int intel_vgpu_bar_rw(struct intel_vgpu
*vgpu
, int bar
, u64 off
,
954 void *buf
, unsigned int count
, bool is_write
)
956 u64 bar_start
= intel_vgpu_get_bar_addr(vgpu
, bar
);
960 ret
= intel_gvt_ops
->emulate_mmio_write(vgpu
,
961 bar_start
+ off
, buf
, count
);
963 ret
= intel_gvt_ops
->emulate_mmio_read(vgpu
,
964 bar_start
+ off
, buf
, count
);
968 static inline bool intel_vgpu_in_aperture(struct intel_vgpu
*vgpu
, u64 off
)
970 return off
>= vgpu_aperture_offset(vgpu
) &&
971 off
< vgpu_aperture_offset(vgpu
) + vgpu_aperture_sz(vgpu
);
974 static int intel_vgpu_aperture_rw(struct intel_vgpu
*vgpu
, u64 off
,
975 void *buf
, unsigned long count
, bool is_write
)
977 void __iomem
*aperture_va
;
979 if (!intel_vgpu_in_aperture(vgpu
, off
) ||
980 !intel_vgpu_in_aperture(vgpu
, off
+ count
)) {
981 gvt_vgpu_err("Invalid aperture offset %llu\n", off
);
985 aperture_va
= io_mapping_map_wc(&vgpu
->gvt
->gt
->ggtt
->iomap
,
986 ALIGN_DOWN(off
, PAGE_SIZE
),
987 count
+ offset_in_page(off
));
992 memcpy_toio(aperture_va
+ offset_in_page(off
), buf
, count
);
994 memcpy_fromio(buf
, aperture_va
+ offset_in_page(off
), count
);
996 io_mapping_unmap(aperture_va
);
1001 static ssize_t
intel_vgpu_rw(struct mdev_device
*mdev
, char *buf
,
1002 size_t count
, loff_t
*ppos
, bool is_write
)
1004 struct intel_vgpu
*vgpu
= mdev_get_drvdata(mdev
);
1005 struct kvmgt_vdev
*vdev
= kvmgt_vdev(vgpu
);
1006 unsigned int index
= VFIO_PCI_OFFSET_TO_INDEX(*ppos
);
1007 u64 pos
= *ppos
& VFIO_PCI_OFFSET_MASK
;
1011 if (index
>= VFIO_PCI_NUM_REGIONS
+ vdev
->num_regions
) {
1012 gvt_vgpu_err("invalid index: %u\n", index
);
1017 case VFIO_PCI_CONFIG_REGION_INDEX
:
1019 ret
= intel_gvt_ops
->emulate_cfg_write(vgpu
, pos
,
1022 ret
= intel_gvt_ops
->emulate_cfg_read(vgpu
, pos
,
1025 case VFIO_PCI_BAR0_REGION_INDEX
:
1026 ret
= intel_vgpu_bar_rw(vgpu
, PCI_BASE_ADDRESS_0
, pos
,
1027 buf
, count
, is_write
);
1029 case VFIO_PCI_BAR2_REGION_INDEX
:
1030 ret
= intel_vgpu_aperture_rw(vgpu
, pos
, buf
, count
, is_write
);
1032 case VFIO_PCI_BAR1_REGION_INDEX
:
1033 case VFIO_PCI_BAR3_REGION_INDEX
:
1034 case VFIO_PCI_BAR4_REGION_INDEX
:
1035 case VFIO_PCI_BAR5_REGION_INDEX
:
1036 case VFIO_PCI_VGA_REGION_INDEX
:
1037 case VFIO_PCI_ROM_REGION_INDEX
:
1040 if (index
>= VFIO_PCI_NUM_REGIONS
+ vdev
->num_regions
)
1043 index
-= VFIO_PCI_NUM_REGIONS
;
1044 return vdev
->region
[index
].ops
->rw(vgpu
, buf
, count
,
1048 return ret
== 0 ? count
: ret
;
1051 static bool gtt_entry(struct mdev_device
*mdev
, loff_t
*ppos
)
1053 struct intel_vgpu
*vgpu
= mdev_get_drvdata(mdev
);
1054 unsigned int index
= VFIO_PCI_OFFSET_TO_INDEX(*ppos
);
1055 struct intel_gvt
*gvt
= vgpu
->gvt
;
1058 /* Only allow MMIO GGTT entry access */
1059 if (index
!= PCI_BASE_ADDRESS_0
)
1062 offset
= (u64
)(*ppos
& VFIO_PCI_OFFSET_MASK
) -
1063 intel_vgpu_get_bar_gpa(vgpu
, PCI_BASE_ADDRESS_0
);
1065 return (offset
>= gvt
->device_info
.gtt_start_offset
&&
1066 offset
< gvt
->device_info
.gtt_start_offset
+ gvt_ggtt_sz(gvt
)) ?
1070 static ssize_t
intel_vgpu_read(struct mdev_device
*mdev
, char __user
*buf
,
1071 size_t count
, loff_t
*ppos
)
1073 unsigned int done
= 0;
1079 /* Only support GGTT entry 8 bytes read */
1080 if (count
>= 8 && !(*ppos
% 8) &&
1081 gtt_entry(mdev
, ppos
)) {
1084 ret
= intel_vgpu_rw(mdev
, (char *)&val
, sizeof(val
),
1089 if (copy_to_user(buf
, &val
, sizeof(val
)))
1093 } else if (count
>= 4 && !(*ppos
% 4)) {
1096 ret
= intel_vgpu_rw(mdev
, (char *)&val
, sizeof(val
),
1101 if (copy_to_user(buf
, &val
, sizeof(val
)))
1105 } else if (count
>= 2 && !(*ppos
% 2)) {
1108 ret
= intel_vgpu_rw(mdev
, (char *)&val
, sizeof(val
),
1113 if (copy_to_user(buf
, &val
, sizeof(val
)))
1120 ret
= intel_vgpu_rw(mdev
, &val
, sizeof(val
), ppos
,
1125 if (copy_to_user(buf
, &val
, sizeof(val
)))
1143 static ssize_t
intel_vgpu_write(struct mdev_device
*mdev
,
1144 const char __user
*buf
,
1145 size_t count
, loff_t
*ppos
)
1147 unsigned int done
= 0;
1153 /* Only support GGTT entry 8 bytes write */
1154 if (count
>= 8 && !(*ppos
% 8) &&
1155 gtt_entry(mdev
, ppos
)) {
1158 if (copy_from_user(&val
, buf
, sizeof(val
)))
1161 ret
= intel_vgpu_rw(mdev
, (char *)&val
, sizeof(val
),
1167 } else if (count
>= 4 && !(*ppos
% 4)) {
1170 if (copy_from_user(&val
, buf
, sizeof(val
)))
1173 ret
= intel_vgpu_rw(mdev
, (char *)&val
, sizeof(val
),
1179 } else if (count
>= 2 && !(*ppos
% 2)) {
1182 if (copy_from_user(&val
, buf
, sizeof(val
)))
1185 ret
= intel_vgpu_rw(mdev
, (char *)&val
,
1186 sizeof(val
), ppos
, true);
1194 if (copy_from_user(&val
, buf
, sizeof(val
)))
1197 ret
= intel_vgpu_rw(mdev
, &val
, sizeof(val
),
1216 static int intel_vgpu_mmap(struct mdev_device
*mdev
, struct vm_area_struct
*vma
)
1220 unsigned long req_size
, pgoff
, req_start
;
1222 struct intel_vgpu
*vgpu
= mdev_get_drvdata(mdev
);
1224 index
= vma
->vm_pgoff
>> (VFIO_PCI_OFFSET_SHIFT
- PAGE_SHIFT
);
1225 if (index
>= VFIO_PCI_ROM_REGION_INDEX
)
1228 if (vma
->vm_end
< vma
->vm_start
)
1230 if ((vma
->vm_flags
& VM_SHARED
) == 0)
1232 if (index
!= VFIO_PCI_BAR2_REGION_INDEX
)
1235 pg_prot
= vma
->vm_page_prot
;
1236 virtaddr
= vma
->vm_start
;
1237 req_size
= vma
->vm_end
- vma
->vm_start
;
1238 pgoff
= vma
->vm_pgoff
&
1239 ((1U << (VFIO_PCI_OFFSET_SHIFT
- PAGE_SHIFT
)) - 1);
1240 req_start
= pgoff
<< PAGE_SHIFT
;
1242 if (!intel_vgpu_in_aperture(vgpu
, req_start
))
1244 if (req_start
+ req_size
>
1245 vgpu_aperture_offset(vgpu
) + vgpu_aperture_sz(vgpu
))
1248 pgoff
= (gvt_aperture_pa_base(vgpu
->gvt
) >> PAGE_SHIFT
) + pgoff
;
1250 return remap_pfn_range(vma
, virtaddr
, pgoff
, req_size
, pg_prot
);
1253 static int intel_vgpu_get_irq_count(struct intel_vgpu
*vgpu
, int type
)
1255 if (type
== VFIO_PCI_INTX_IRQ_INDEX
|| type
== VFIO_PCI_MSI_IRQ_INDEX
)
1261 static int intel_vgpu_set_intx_mask(struct intel_vgpu
*vgpu
,
1262 unsigned int index
, unsigned int start
,
1263 unsigned int count
, u32 flags
,
1269 static int intel_vgpu_set_intx_unmask(struct intel_vgpu
*vgpu
,
1270 unsigned int index
, unsigned int start
,
1271 unsigned int count
, u32 flags
, void *data
)
1276 static int intel_vgpu_set_intx_trigger(struct intel_vgpu
*vgpu
,
1277 unsigned int index
, unsigned int start
, unsigned int count
,
1278 u32 flags
, void *data
)
1283 static int intel_vgpu_set_msi_trigger(struct intel_vgpu
*vgpu
,
1284 unsigned int index
, unsigned int start
, unsigned int count
,
1285 u32 flags
, void *data
)
1287 struct eventfd_ctx
*trigger
;
1289 if (flags
& VFIO_IRQ_SET_DATA_EVENTFD
) {
1290 int fd
= *(int *)data
;
1292 trigger
= eventfd_ctx_fdget(fd
);
1293 if (IS_ERR(trigger
)) {
1294 gvt_vgpu_err("eventfd_ctx_fdget failed\n");
1295 return PTR_ERR(trigger
);
1297 kvmgt_vdev(vgpu
)->msi_trigger
= trigger
;
1298 } else if ((flags
& VFIO_IRQ_SET_DATA_NONE
) && !count
)
1299 intel_vgpu_release_msi_eventfd_ctx(vgpu
);
1304 static int intel_vgpu_set_irqs(struct intel_vgpu
*vgpu
, u32 flags
,
1305 unsigned int index
, unsigned int start
, unsigned int count
,
1308 int (*func
)(struct intel_vgpu
*vgpu
, unsigned int index
,
1309 unsigned int start
, unsigned int count
, u32 flags
,
1313 case VFIO_PCI_INTX_IRQ_INDEX
:
1314 switch (flags
& VFIO_IRQ_SET_ACTION_TYPE_MASK
) {
1315 case VFIO_IRQ_SET_ACTION_MASK
:
1316 func
= intel_vgpu_set_intx_mask
;
1318 case VFIO_IRQ_SET_ACTION_UNMASK
:
1319 func
= intel_vgpu_set_intx_unmask
;
1321 case VFIO_IRQ_SET_ACTION_TRIGGER
:
1322 func
= intel_vgpu_set_intx_trigger
;
1326 case VFIO_PCI_MSI_IRQ_INDEX
:
1327 switch (flags
& VFIO_IRQ_SET_ACTION_TYPE_MASK
) {
1328 case VFIO_IRQ_SET_ACTION_MASK
:
1329 case VFIO_IRQ_SET_ACTION_UNMASK
:
1330 /* XXX Need masking support exported */
1332 case VFIO_IRQ_SET_ACTION_TRIGGER
:
1333 func
= intel_vgpu_set_msi_trigger
;
1342 return func(vgpu
, index
, start
, count
, flags
, data
);
1345 static long intel_vgpu_ioctl(struct mdev_device
*mdev
, unsigned int cmd
,
1348 struct intel_vgpu
*vgpu
= mdev_get_drvdata(mdev
);
1349 struct kvmgt_vdev
*vdev
= kvmgt_vdev(vgpu
);
1350 unsigned long minsz
;
1352 gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu
->id
, cmd
);
1354 if (cmd
== VFIO_DEVICE_GET_INFO
) {
1355 struct vfio_device_info info
;
1357 minsz
= offsetofend(struct vfio_device_info
, num_irqs
);
1359 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
1362 if (info
.argsz
< minsz
)
1365 info
.flags
= VFIO_DEVICE_FLAGS_PCI
;
1366 info
.flags
|= VFIO_DEVICE_FLAGS_RESET
;
1367 info
.num_regions
= VFIO_PCI_NUM_REGIONS
+
1369 info
.num_irqs
= VFIO_PCI_NUM_IRQS
;
1371 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
1374 } else if (cmd
== VFIO_DEVICE_GET_REGION_INFO
) {
1375 struct vfio_region_info info
;
1376 struct vfio_info_cap caps
= { .buf
= NULL
, .size
= 0 };
1379 struct vfio_region_info_cap_sparse_mmap
*sparse
= NULL
;
1383 minsz
= offsetofend(struct vfio_region_info
, offset
);
1385 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
1388 if (info
.argsz
< minsz
)
1391 switch (info
.index
) {
1392 case VFIO_PCI_CONFIG_REGION_INDEX
:
1393 info
.offset
= VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
1394 info
.size
= vgpu
->gvt
->device_info
.cfg_space_size
;
1395 info
.flags
= VFIO_REGION_INFO_FLAG_READ
|
1396 VFIO_REGION_INFO_FLAG_WRITE
;
1398 case VFIO_PCI_BAR0_REGION_INDEX
:
1399 info
.offset
= VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
1400 info
.size
= vgpu
->cfg_space
.bar
[info
.index
].size
;
1406 info
.flags
= VFIO_REGION_INFO_FLAG_READ
|
1407 VFIO_REGION_INFO_FLAG_WRITE
;
1409 case VFIO_PCI_BAR1_REGION_INDEX
:
1410 info
.offset
= VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
1414 case VFIO_PCI_BAR2_REGION_INDEX
:
1415 info
.offset
= VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
1416 info
.flags
= VFIO_REGION_INFO_FLAG_CAPS
|
1417 VFIO_REGION_INFO_FLAG_MMAP
|
1418 VFIO_REGION_INFO_FLAG_READ
|
1419 VFIO_REGION_INFO_FLAG_WRITE
;
1420 info
.size
= gvt_aperture_sz(vgpu
->gvt
);
1422 sparse
= kzalloc(struct_size(sparse
, areas
, nr_areas
),
1427 sparse
->header
.id
= VFIO_REGION_INFO_CAP_SPARSE_MMAP
;
1428 sparse
->header
.version
= 1;
1429 sparse
->nr_areas
= nr_areas
;
1430 cap_type_id
= VFIO_REGION_INFO_CAP_SPARSE_MMAP
;
1431 sparse
->areas
[0].offset
=
1432 PAGE_ALIGN(vgpu_aperture_offset(vgpu
));
1433 sparse
->areas
[0].size
= vgpu_aperture_sz(vgpu
);
1436 case VFIO_PCI_BAR3_REGION_INDEX
... VFIO_PCI_BAR5_REGION_INDEX
:
1437 info
.offset
= VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
1441 gvt_dbg_core("get region info bar:%d\n", info
.index
);
1444 case VFIO_PCI_ROM_REGION_INDEX
:
1445 case VFIO_PCI_VGA_REGION_INDEX
:
1446 info
.offset
= VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
1450 gvt_dbg_core("get region info index:%d\n", info
.index
);
1454 struct vfio_region_info_cap_type cap_type
= {
1455 .header
.id
= VFIO_REGION_INFO_CAP_TYPE
,
1456 .header
.version
= 1 };
1458 if (info
.index
>= VFIO_PCI_NUM_REGIONS
+
1462 array_index_nospec(info
.index
,
1463 VFIO_PCI_NUM_REGIONS
+
1466 i
= info
.index
- VFIO_PCI_NUM_REGIONS
;
1469 VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
1470 info
.size
= vdev
->region
[i
].size
;
1471 info
.flags
= vdev
->region
[i
].flags
;
1473 cap_type
.type
= vdev
->region
[i
].type
;
1474 cap_type
.subtype
= vdev
->region
[i
].subtype
;
1476 ret
= vfio_info_add_capability(&caps
,
1484 if ((info
.flags
& VFIO_REGION_INFO_FLAG_CAPS
) && sparse
) {
1485 switch (cap_type_id
) {
1486 case VFIO_REGION_INFO_CAP_SPARSE_MMAP
:
1487 ret
= vfio_info_add_capability(&caps
,
1489 struct_size(sparse
, areas
,
1503 info
.flags
|= VFIO_REGION_INFO_FLAG_CAPS
;
1504 if (info
.argsz
< sizeof(info
) + caps
.size
) {
1505 info
.argsz
= sizeof(info
) + caps
.size
;
1506 info
.cap_offset
= 0;
1508 vfio_info_cap_shift(&caps
, sizeof(info
));
1509 if (copy_to_user((void __user
*)arg
+
1510 sizeof(info
), caps
.buf
,
1516 info
.cap_offset
= sizeof(info
);
1523 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
1525 } else if (cmd
== VFIO_DEVICE_GET_IRQ_INFO
) {
1526 struct vfio_irq_info info
;
1528 minsz
= offsetofend(struct vfio_irq_info
, count
);
1530 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
1533 if (info
.argsz
< minsz
|| info
.index
>= VFIO_PCI_NUM_IRQS
)
1536 switch (info
.index
) {
1537 case VFIO_PCI_INTX_IRQ_INDEX
:
1538 case VFIO_PCI_MSI_IRQ_INDEX
:
1544 info
.flags
= VFIO_IRQ_INFO_EVENTFD
;
1546 info
.count
= intel_vgpu_get_irq_count(vgpu
, info
.index
);
1548 if (info
.index
== VFIO_PCI_INTX_IRQ_INDEX
)
1549 info
.flags
|= (VFIO_IRQ_INFO_MASKABLE
|
1550 VFIO_IRQ_INFO_AUTOMASKED
);
1552 info
.flags
|= VFIO_IRQ_INFO_NORESIZE
;
1554 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
1556 } else if (cmd
== VFIO_DEVICE_SET_IRQS
) {
1557 struct vfio_irq_set hdr
;
1560 size_t data_size
= 0;
1562 minsz
= offsetofend(struct vfio_irq_set
, count
);
1564 if (copy_from_user(&hdr
, (void __user
*)arg
, minsz
))
1567 if (!(hdr
.flags
& VFIO_IRQ_SET_DATA_NONE
)) {
1568 int max
= intel_vgpu_get_irq_count(vgpu
, hdr
.index
);
1570 ret
= vfio_set_irqs_validate_and_prepare(&hdr
, max
,
1571 VFIO_PCI_NUM_IRQS
, &data_size
);
1573 gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
1577 data
= memdup_user((void __user
*)(arg
+ minsz
),
1580 return PTR_ERR(data
);
1584 ret
= intel_vgpu_set_irqs(vgpu
, hdr
.flags
, hdr
.index
,
1585 hdr
.start
, hdr
.count
, data
);
1589 } else if (cmd
== VFIO_DEVICE_RESET
) {
1590 intel_gvt_ops
->vgpu_reset(vgpu
);
1592 } else if (cmd
== VFIO_DEVICE_QUERY_GFX_PLANE
) {
1593 struct vfio_device_gfx_plane_info dmabuf
;
1596 minsz
= offsetofend(struct vfio_device_gfx_plane_info
,
1598 if (copy_from_user(&dmabuf
, (void __user
*)arg
, minsz
))
1600 if (dmabuf
.argsz
< minsz
)
1603 ret
= intel_gvt_ops
->vgpu_query_plane(vgpu
, &dmabuf
);
1607 return copy_to_user((void __user
*)arg
, &dmabuf
, minsz
) ?
1609 } else if (cmd
== VFIO_DEVICE_GET_GFX_DMABUF
) {
1613 if (get_user(dmabuf_id
, (__u32 __user
*)arg
))
1616 dmabuf_fd
= intel_gvt_ops
->vgpu_get_dmabuf(vgpu
, dmabuf_id
);
1625 vgpu_id_show(struct device
*dev
, struct device_attribute
*attr
,
1628 struct mdev_device
*mdev
= mdev_from_dev(dev
);
1631 struct intel_vgpu
*vgpu
= (struct intel_vgpu
*)
1632 mdev_get_drvdata(mdev
);
1633 return sprintf(buf
, "%d\n", vgpu
->id
);
1635 return sprintf(buf
, "\n");
1638 static DEVICE_ATTR_RO(vgpu_id
);
1640 static struct attribute
*intel_vgpu_attrs
[] = {
1641 &dev_attr_vgpu_id
.attr
,
1645 static const struct attribute_group intel_vgpu_group
= {
1646 .name
= "intel_vgpu",
1647 .attrs
= intel_vgpu_attrs
,
1650 static const struct attribute_group
*intel_vgpu_groups
[] = {
1655 static struct mdev_parent_ops intel_vgpu_ops
= {
1656 .mdev_attr_groups
= intel_vgpu_groups
,
1657 .create
= intel_vgpu_create
,
1658 .remove
= intel_vgpu_remove
,
1660 .open
= intel_vgpu_open
,
1661 .release
= intel_vgpu_release
,
1663 .read
= intel_vgpu_read
,
1664 .write
= intel_vgpu_write
,
1665 .mmap
= intel_vgpu_mmap
,
1666 .ioctl
= intel_vgpu_ioctl
,
1669 static int kvmgt_host_init(struct device
*dev
, void *gvt
, const void *ops
)
1671 struct attribute_group
**kvm_vgpu_type_groups
;
1673 intel_gvt_ops
= ops
;
1674 if (!intel_gvt_ops
->get_gvt_attrs(&kvm_vgpu_type_groups
))
1676 intel_vgpu_ops
.supported_type_groups
= kvm_vgpu_type_groups
;
1678 return mdev_register_device(dev
, &intel_vgpu_ops
);
1681 static void kvmgt_host_exit(struct device
*dev
)
1683 mdev_unregister_device(dev
);
1686 static int kvmgt_page_track_add(unsigned long handle
, u64 gfn
)
1688 struct kvmgt_guest_info
*info
;
1690 struct kvm_memory_slot
*slot
;
1693 if (!handle_valid(handle
))
1696 info
= (struct kvmgt_guest_info
*)handle
;
1699 idx
= srcu_read_lock(&kvm
->srcu
);
1700 slot
= gfn_to_memslot(kvm
, gfn
);
1702 srcu_read_unlock(&kvm
->srcu
, idx
);
1706 spin_lock(&kvm
->mmu_lock
);
1708 if (kvmgt_gfn_is_write_protected(info
, gfn
))
1711 kvm_slot_page_track_add_page(kvm
, slot
, gfn
, KVM_PAGE_TRACK_WRITE
);
1712 kvmgt_protect_table_add(info
, gfn
);
1715 spin_unlock(&kvm
->mmu_lock
);
1716 srcu_read_unlock(&kvm
->srcu
, idx
);
1720 static int kvmgt_page_track_remove(unsigned long handle
, u64 gfn
)
1722 struct kvmgt_guest_info
*info
;
1724 struct kvm_memory_slot
*slot
;
1727 if (!handle_valid(handle
))
1730 info
= (struct kvmgt_guest_info
*)handle
;
1733 idx
= srcu_read_lock(&kvm
->srcu
);
1734 slot
= gfn_to_memslot(kvm
, gfn
);
1736 srcu_read_unlock(&kvm
->srcu
, idx
);
1740 spin_lock(&kvm
->mmu_lock
);
1742 if (!kvmgt_gfn_is_write_protected(info
, gfn
))
1745 kvm_slot_page_track_remove_page(kvm
, slot
, gfn
, KVM_PAGE_TRACK_WRITE
);
1746 kvmgt_protect_table_del(info
, gfn
);
1749 spin_unlock(&kvm
->mmu_lock
);
1750 srcu_read_unlock(&kvm
->srcu
, idx
);
1754 static void kvmgt_page_track_write(struct kvm_vcpu
*vcpu
, gpa_t gpa
,
1755 const u8
*val
, int len
,
1756 struct kvm_page_track_notifier_node
*node
)
1758 struct kvmgt_guest_info
*info
= container_of(node
,
1759 struct kvmgt_guest_info
, track_node
);
1761 if (kvmgt_gfn_is_write_protected(info
, gpa_to_gfn(gpa
)))
1762 intel_gvt_ops
->write_protect_handler(info
->vgpu
, gpa
,
1766 static void kvmgt_page_track_flush_slot(struct kvm
*kvm
,
1767 struct kvm_memory_slot
*slot
,
1768 struct kvm_page_track_notifier_node
*node
)
1772 struct kvmgt_guest_info
*info
= container_of(node
,
1773 struct kvmgt_guest_info
, track_node
);
1775 spin_lock(&kvm
->mmu_lock
);
1776 for (i
= 0; i
< slot
->npages
; i
++) {
1777 gfn
= slot
->base_gfn
+ i
;
1778 if (kvmgt_gfn_is_write_protected(info
, gfn
)) {
1779 kvm_slot_page_track_remove_page(kvm
, slot
, gfn
,
1780 KVM_PAGE_TRACK_WRITE
);
1781 kvmgt_protect_table_del(info
, gfn
);
1784 spin_unlock(&kvm
->mmu_lock
);
1787 static bool __kvmgt_vgpu_exist(struct intel_vgpu
*vgpu
, struct kvm
*kvm
)
1789 struct intel_vgpu
*itr
;
1790 struct kvmgt_guest_info
*info
;
1794 mutex_lock(&vgpu
->gvt
->lock
);
1795 for_each_active_vgpu(vgpu
->gvt
, itr
, id
) {
1796 if (!handle_valid(itr
->handle
))
1799 info
= (struct kvmgt_guest_info
*)itr
->handle
;
1800 if (kvm
&& kvm
== info
->kvm
) {
1806 mutex_unlock(&vgpu
->gvt
->lock
);
1810 static int kvmgt_guest_init(struct mdev_device
*mdev
)
1812 struct kvmgt_guest_info
*info
;
1813 struct intel_vgpu
*vgpu
;
1814 struct kvmgt_vdev
*vdev
;
1817 vgpu
= mdev_get_drvdata(mdev
);
1818 if (handle_valid(vgpu
->handle
))
1821 vdev
= kvmgt_vdev(vgpu
);
1823 if (!kvm
|| kvm
->mm
!= current
->mm
) {
1824 gvt_vgpu_err("KVM is required to use Intel vGPU\n");
1828 if (__kvmgt_vgpu_exist(vgpu
, kvm
))
1831 info
= vzalloc(sizeof(struct kvmgt_guest_info
));
1835 vgpu
->handle
= (unsigned long)info
;
1838 kvm_get_kvm(info
->kvm
);
1840 kvmgt_protect_table_init(info
);
1841 gvt_cache_init(vgpu
);
1843 info
->track_node
.track_write
= kvmgt_page_track_write
;
1844 info
->track_node
.track_flush_slot
= kvmgt_page_track_flush_slot
;
1845 kvm_page_track_register_notifier(kvm
, &info
->track_node
);
1847 info
->debugfs_cache_entries
= debugfs_create_ulong(
1848 "kvmgt_nr_cache_entries",
1849 0444, vgpu
->debugfs
,
1850 &vdev
->nr_cache_entries
);
1854 static bool kvmgt_guest_exit(struct kvmgt_guest_info
*info
)
1856 debugfs_remove(info
->debugfs_cache_entries
);
1858 kvm_page_track_unregister_notifier(info
->kvm
, &info
->track_node
);
1859 kvm_put_kvm(info
->kvm
);
1860 kvmgt_protect_table_destroy(info
);
1861 gvt_cache_destroy(info
->vgpu
);
1867 static int kvmgt_attach_vgpu(void *p_vgpu
, unsigned long *handle
)
1869 struct intel_vgpu
*vgpu
= (struct intel_vgpu
*)p_vgpu
;
1871 vgpu
->vdev
= kzalloc(sizeof(struct kvmgt_vdev
), GFP_KERNEL
);
1876 kvmgt_vdev(vgpu
)->vgpu
= vgpu
;
1881 static void kvmgt_detach_vgpu(void *p_vgpu
)
1884 struct intel_vgpu
*vgpu
= (struct intel_vgpu
*)p_vgpu
;
1885 struct kvmgt_vdev
*vdev
= kvmgt_vdev(vgpu
);
1890 for (i
= 0; i
< vdev
->num_regions
; i
++)
1891 if (vdev
->region
[i
].ops
->release
)
1892 vdev
->region
[i
].ops
->release(vgpu
,
1894 vdev
->num_regions
= 0;
1895 kfree(vdev
->region
);
1896 vdev
->region
= NULL
;
1901 static int kvmgt_inject_msi(unsigned long handle
, u32 addr
, u16 data
)
1903 struct kvmgt_guest_info
*info
;
1904 struct intel_vgpu
*vgpu
;
1905 struct kvmgt_vdev
*vdev
;
1907 if (!handle_valid(handle
))
1910 info
= (struct kvmgt_guest_info
*)handle
;
1912 vdev
= kvmgt_vdev(vgpu
);
1915 * When guest is poweroff, msi_trigger is set to NULL, but vgpu's
1916 * config and mmio register isn't restored to default during guest
1917 * poweroff. If this vgpu is still used in next vm, this vgpu's pipe
1918 * may be enabled, then once this vgpu is active, it will get inject
1919 * vblank interrupt request. But msi_trigger is null until msi is
1920 * enabled by guest. so if msi_trigger is null, success is still
1921 * returned and don't inject interrupt into guest.
1923 if (vdev
->msi_trigger
== NULL
)
1926 if (eventfd_signal(vdev
->msi_trigger
, 1) == 1)
1932 static unsigned long kvmgt_gfn_to_pfn(unsigned long handle
, unsigned long gfn
)
1934 struct kvmgt_guest_info
*info
;
1937 if (!handle_valid(handle
))
1938 return INTEL_GVT_INVALID_ADDR
;
1940 info
= (struct kvmgt_guest_info
*)handle
;
1942 pfn
= gfn_to_pfn(info
->kvm
, gfn
);
1943 if (is_error_noslot_pfn(pfn
))
1944 return INTEL_GVT_INVALID_ADDR
;
1949 static int kvmgt_dma_map_guest_page(unsigned long handle
, unsigned long gfn
,
1950 unsigned long size
, dma_addr_t
*dma_addr
)
1952 struct intel_vgpu
*vgpu
;
1953 struct kvmgt_vdev
*vdev
;
1954 struct gvt_dma
*entry
;
1957 if (!handle_valid(handle
))
1960 vgpu
= ((struct kvmgt_guest_info
*)handle
)->vgpu
;
1961 vdev
= kvmgt_vdev(vgpu
);
1963 mutex_lock(&vdev
->cache_lock
);
1965 entry
= __gvt_cache_find_gfn(vgpu
, gfn
);
1967 ret
= gvt_dma_map_page(vgpu
, gfn
, dma_addr
, size
);
1971 ret
= __gvt_cache_add(vgpu
, gfn
, *dma_addr
, size
);
1974 } else if (entry
->size
!= size
) {
1975 /* the same gfn with different size: unmap and re-map */
1976 gvt_dma_unmap_page(vgpu
, gfn
, entry
->dma_addr
, entry
->size
);
1977 __gvt_cache_remove_entry(vgpu
, entry
);
1979 ret
= gvt_dma_map_page(vgpu
, gfn
, dma_addr
, size
);
1983 ret
= __gvt_cache_add(vgpu
, gfn
, *dma_addr
, size
);
1987 kref_get(&entry
->ref
);
1988 *dma_addr
= entry
->dma_addr
;
1991 mutex_unlock(&vdev
->cache_lock
);
1995 gvt_dma_unmap_page(vgpu
, gfn
, *dma_addr
, size
);
1997 mutex_unlock(&vdev
->cache_lock
);
2001 static int kvmgt_dma_pin_guest_page(unsigned long handle
, dma_addr_t dma_addr
)
2003 struct kvmgt_guest_info
*info
;
2004 struct kvmgt_vdev
*vdev
;
2005 struct gvt_dma
*entry
;
2008 if (!handle_valid(handle
))
2011 info
= (struct kvmgt_guest_info
*)handle
;
2012 vdev
= kvmgt_vdev(info
->vgpu
);
2014 mutex_lock(&vdev
->cache_lock
);
2015 entry
= __gvt_cache_find_dma_addr(info
->vgpu
, dma_addr
);
2017 kref_get(&entry
->ref
);
2020 mutex_unlock(&vdev
->cache_lock
);
2025 static void __gvt_dma_release(struct kref
*ref
)
2027 struct gvt_dma
*entry
= container_of(ref
, typeof(*entry
), ref
);
2029 gvt_dma_unmap_page(entry
->vgpu
, entry
->gfn
, entry
->dma_addr
,
2031 __gvt_cache_remove_entry(entry
->vgpu
, entry
);
2034 static void kvmgt_dma_unmap_guest_page(unsigned long handle
, dma_addr_t dma_addr
)
2036 struct intel_vgpu
*vgpu
;
2037 struct kvmgt_vdev
*vdev
;
2038 struct gvt_dma
*entry
;
2040 if (!handle_valid(handle
))
2043 vgpu
= ((struct kvmgt_guest_info
*)handle
)->vgpu
;
2044 vdev
= kvmgt_vdev(vgpu
);
2046 mutex_lock(&vdev
->cache_lock
);
2047 entry
= __gvt_cache_find_dma_addr(vgpu
, dma_addr
);
2049 kref_put(&entry
->ref
, __gvt_dma_release
);
2050 mutex_unlock(&vdev
->cache_lock
);
2053 static int kvmgt_rw_gpa(unsigned long handle
, unsigned long gpa
,
2054 void *buf
, unsigned long len
, bool write
)
2056 struct kvmgt_guest_info
*info
;
2058 if (!handle_valid(handle
))
2061 info
= (struct kvmgt_guest_info
*)handle
;
2063 return vfio_dma_rw(kvmgt_vdev(info
->vgpu
)->vfio_group
,
2064 gpa
, buf
, len
, write
);
2067 static int kvmgt_read_gpa(unsigned long handle
, unsigned long gpa
,
2068 void *buf
, unsigned long len
)
2070 return kvmgt_rw_gpa(handle
, gpa
, buf
, len
, false);
2073 static int kvmgt_write_gpa(unsigned long handle
, unsigned long gpa
,
2074 void *buf
, unsigned long len
)
2076 return kvmgt_rw_gpa(handle
, gpa
, buf
, len
, true);
2079 static unsigned long kvmgt_virt_to_pfn(void *addr
)
2081 return PFN_DOWN(__pa(addr
));
2084 static bool kvmgt_is_valid_gfn(unsigned long handle
, unsigned long gfn
)
2086 struct kvmgt_guest_info
*info
;
2091 if (!handle_valid(handle
))
2094 info
= (struct kvmgt_guest_info
*)handle
;
2097 idx
= srcu_read_lock(&kvm
->srcu
);
2098 ret
= kvm_is_visible_gfn(kvm
, gfn
);
2099 srcu_read_unlock(&kvm
->srcu
, idx
);
2104 static const struct intel_gvt_mpt kvmgt_mpt
= {
2105 .type
= INTEL_GVT_HYPERVISOR_KVM
,
2106 .host_init
= kvmgt_host_init
,
2107 .host_exit
= kvmgt_host_exit
,
2108 .attach_vgpu
= kvmgt_attach_vgpu
,
2109 .detach_vgpu
= kvmgt_detach_vgpu
,
2110 .inject_msi
= kvmgt_inject_msi
,
2111 .from_virt_to_mfn
= kvmgt_virt_to_pfn
,
2112 .enable_page_track
= kvmgt_page_track_add
,
2113 .disable_page_track
= kvmgt_page_track_remove
,
2114 .read_gpa
= kvmgt_read_gpa
,
2115 .write_gpa
= kvmgt_write_gpa
,
2116 .gfn_to_mfn
= kvmgt_gfn_to_pfn
,
2117 .dma_map_guest_page
= kvmgt_dma_map_guest_page
,
2118 .dma_unmap_guest_page
= kvmgt_dma_unmap_guest_page
,
2119 .dma_pin_guest_page
= kvmgt_dma_pin_guest_page
,
2120 .set_opregion
= kvmgt_set_opregion
,
2121 .set_edid
= kvmgt_set_edid
,
2122 .get_vfio_device
= kvmgt_get_vfio_device
,
2123 .put_vfio_device
= kvmgt_put_vfio_device
,
2124 .is_valid_gfn
= kvmgt_is_valid_gfn
,
2127 static int __init
kvmgt_init(void)
2129 if (intel_gvt_register_hypervisor(&kvmgt_mpt
) < 0)
2134 static void __exit
kvmgt_exit(void)
2136 intel_gvt_unregister_hypervisor();
2139 module_init(kvmgt_init
);
2140 module_exit(kvmgt_exit
);
2142 MODULE_LICENSE("GPL and additional rights");
2143 MODULE_AUTHOR("Intel Corporation");