2 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
3 * Author: Alex Williamson <alex.williamson@redhat.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * Derived from original vfio:
10 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
11 * Author: Tom Lyon, pugs@cisco.com
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/device.h>
17 #include <linux/eventfd.h>
18 #include <linux/file.h>
19 #include <linux/interrupt.h>
20 #include <linux/iommu.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/notifier.h>
24 #include <linux/pci.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
29 #include <linux/vfio.h>
30 #include <linux/vgaarb.h>
31 #include <linux/nospec.h>
33 #include "vfio_pci_private.h"
35 #define DRIVER_VERSION "0.2"
36 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
37 #define DRIVER_DESC "VFIO PCI - User Level meta-driver"
39 static char ids
[1024] __initdata
;
40 module_param_string(ids
, ids
, sizeof(ids
), 0);
41 MODULE_PARM_DESC(ids
, "Initial PCI IDs to add to the vfio driver, format is \"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\" and multiple comma separated entries can be specified");
43 static bool nointxmask
;
44 module_param_named(nointxmask
, nointxmask
, bool, S_IRUGO
| S_IWUSR
);
45 MODULE_PARM_DESC(nointxmask
,
46 "Disable support for PCI 2.3 style INTx masking. If this resolves problems for specific devices, report lspci -vvvxxx to linux-pci@vger.kernel.org so the device can be fixed automatically via the broken_intx_masking flag.");
48 #ifdef CONFIG_VFIO_PCI_VGA
49 static bool disable_vga
;
50 module_param(disable_vga
, bool, S_IRUGO
);
51 MODULE_PARM_DESC(disable_vga
, "Disable VGA resource access through vfio-pci");
54 static bool disable_idle_d3
;
55 module_param(disable_idle_d3
, bool, S_IRUGO
| S_IWUSR
);
56 MODULE_PARM_DESC(disable_idle_d3
,
57 "Disable using the PCI D3 low power state for idle, unused devices");
59 static DEFINE_MUTEX(driver_lock
);
61 static inline bool vfio_vga_disabled(void)
63 #ifdef CONFIG_VFIO_PCI_VGA
71 * Our VGA arbiter participation is limited since we don't know anything
72 * about the device itself. However, if the device is the only VGA device
73 * downstream of a bridge and VFIO VGA support is disabled, then we can
74 * safely return legacy VGA IO and memory as not decoded since the user
75 * has no way to get to it and routing can be disabled externally at the
78 static unsigned int vfio_pci_set_vga_decode(void *opaque
, bool single_vga
)
80 struct vfio_pci_device
*vdev
= opaque
;
81 struct pci_dev
*tmp
= NULL
, *pdev
= vdev
->pdev
;
82 unsigned char max_busnr
;
85 if (single_vga
|| !vfio_vga_disabled() || pci_is_root_bus(pdev
->bus
))
86 return VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
|
87 VGA_RSRC_LEGACY_IO
| VGA_RSRC_LEGACY_MEM
;
89 max_busnr
= pci_bus_max_busnr(pdev
->bus
);
90 decodes
= VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
92 while ((tmp
= pci_get_class(PCI_CLASS_DISPLAY_VGA
<< 8, tmp
)) != NULL
) {
94 pci_domain_nr(tmp
->bus
) != pci_domain_nr(pdev
->bus
) ||
95 pci_is_root_bus(tmp
->bus
))
98 if (tmp
->bus
->number
>= pdev
->bus
->number
&&
99 tmp
->bus
->number
<= max_busnr
) {
101 decodes
|= VGA_RSRC_LEGACY_IO
| VGA_RSRC_LEGACY_MEM
;
109 static inline bool vfio_pci_is_vga(struct pci_dev
*pdev
)
111 return (pdev
->class >> 8) == PCI_CLASS_DISPLAY_VGA
;
114 static void vfio_pci_probe_mmaps(struct vfio_pci_device
*vdev
)
116 struct resource
*res
;
118 struct vfio_pci_dummy_resource
*dummy_res
;
120 INIT_LIST_HEAD(&vdev
->dummy_resources_list
);
122 for (bar
= PCI_STD_RESOURCES
; bar
<= PCI_STD_RESOURCE_END
; bar
++) {
123 res
= vdev
->pdev
->resource
+ bar
;
125 if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP
))
128 if (!(res
->flags
& IORESOURCE_MEM
))
132 * The PCI core shouldn't set up a resource with a
133 * type but zero size. But there may be bugs that
134 * cause us to do that.
136 if (!resource_size(res
))
139 if (resource_size(res
) >= PAGE_SIZE
) {
140 vdev
->bar_mmap_supported
[bar
] = true;
144 if (!(res
->start
& ~PAGE_MASK
)) {
146 * Add a dummy resource to reserve the remainder
147 * of the exclusive page in case that hot-add
148 * device's bar is assigned into it.
150 dummy_res
= kzalloc(sizeof(*dummy_res
), GFP_KERNEL
);
151 if (dummy_res
== NULL
)
154 dummy_res
->resource
.name
= "vfio sub-page reserved";
155 dummy_res
->resource
.start
= res
->end
+ 1;
156 dummy_res
->resource
.end
= res
->start
+ PAGE_SIZE
- 1;
157 dummy_res
->resource
.flags
= res
->flags
;
158 if (request_resource(res
->parent
,
159 &dummy_res
->resource
)) {
163 dummy_res
->index
= bar
;
164 list_add(&dummy_res
->res_next
,
165 &vdev
->dummy_resources_list
);
166 vdev
->bar_mmap_supported
[bar
] = true;
170 * Here we don't handle the case when the BAR is not page
171 * aligned because we can't expect the BAR will be
172 * assigned into the same location in a page in guest
173 * when we passthrough the BAR. And it's hard to access
174 * this BAR in userspace because we have no way to get
175 * the BAR's location in a page.
178 vdev
->bar_mmap_supported
[bar
] = false;
182 static void vfio_pci_try_bus_reset(struct vfio_pci_device
*vdev
);
183 static void vfio_pci_disable(struct vfio_pci_device
*vdev
);
186 * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
187 * _and_ the ability detect when the device is asserting INTx via PCI_STATUS.
188 * If a device implements the former but not the latter we would typically
189 * expect broken_intx_masking be set and require an exclusive interrupt.
190 * However since we do have control of the device's ability to assert INTx,
191 * we can instead pretend that the device does not implement INTx, virtualizing
192 * the pin register to report zero and maintaining DisINTx set on the host.
194 static bool vfio_pci_nointx(struct pci_dev
*pdev
)
196 switch (pdev
->vendor
) {
197 case PCI_VENDOR_ID_INTEL
:
198 switch (pdev
->device
) {
199 /* All i40e (XL710/X710) 10/20/40GbE NICs */
202 case 0x1580 ... 0x1581:
203 case 0x1583 ... 0x1589:
204 case 0x37d0 ... 0x37d2:
214 static int vfio_pci_enable(struct vfio_pci_device
*vdev
)
216 struct pci_dev
*pdev
= vdev
->pdev
;
221 pci_set_power_state(pdev
, PCI_D0
);
223 /* Don't allow our initial saved state to include busmaster */
224 pci_clear_master(pdev
);
226 ret
= pci_enable_device(pdev
);
230 vdev
->reset_works
= (pci_reset_function(pdev
) == 0);
231 pci_save_state(pdev
);
232 vdev
->pci_saved_state
= pci_store_saved_state(pdev
);
233 if (!vdev
->pci_saved_state
)
234 pr_debug("%s: Couldn't store %s saved state\n",
235 __func__
, dev_name(&pdev
->dev
));
237 if (likely(!nointxmask
)) {
238 if (vfio_pci_nointx(pdev
)) {
239 dev_info(&pdev
->dev
, "Masking broken INTx support\n");
243 vdev
->pci_2_3
= pci_intx_mask_supported(pdev
);
246 pci_read_config_word(pdev
, PCI_COMMAND
, &cmd
);
247 if (vdev
->pci_2_3
&& (cmd
& PCI_COMMAND_INTX_DISABLE
)) {
248 cmd
&= ~PCI_COMMAND_INTX_DISABLE
;
249 pci_write_config_word(pdev
, PCI_COMMAND
, cmd
);
252 ret
= vfio_config_init(vdev
);
254 kfree(vdev
->pci_saved_state
);
255 vdev
->pci_saved_state
= NULL
;
256 pci_disable_device(pdev
);
260 msix_pos
= pdev
->msix_cap
;
265 pci_read_config_word(pdev
, msix_pos
+ PCI_MSIX_FLAGS
, &flags
);
266 pci_read_config_dword(pdev
, msix_pos
+ PCI_MSIX_TABLE
, &table
);
268 vdev
->msix_bar
= table
& PCI_MSIX_TABLE_BIR
;
269 vdev
->msix_offset
= table
& PCI_MSIX_TABLE_OFFSET
;
270 vdev
->msix_size
= ((flags
& PCI_MSIX_FLAGS_QSIZE
) + 1) * 16;
272 vdev
->msix_bar
= 0xFF;
274 if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev
))
275 vdev
->has_vga
= true;
278 if (vfio_pci_is_vga(pdev
) &&
279 pdev
->vendor
== PCI_VENDOR_ID_INTEL
&&
280 IS_ENABLED(CONFIG_VFIO_PCI_IGD
)) {
281 ret
= vfio_pci_igd_init(vdev
);
283 dev_warn(&vdev
->pdev
->dev
,
284 "Failed to setup Intel IGD regions\n");
285 vfio_pci_disable(vdev
);
290 vfio_pci_probe_mmaps(vdev
);
295 static void vfio_pci_disable(struct vfio_pci_device
*vdev
)
297 struct pci_dev
*pdev
= vdev
->pdev
;
298 struct vfio_pci_dummy_resource
*dummy_res
, *tmp
;
301 /* Stop the device from further DMA */
302 pci_clear_master(pdev
);
304 vfio_pci_set_irqs_ioctl(vdev
, VFIO_IRQ_SET_DATA_NONE
|
305 VFIO_IRQ_SET_ACTION_TRIGGER
,
306 vdev
->irq_type
, 0, 0, NULL
);
308 vdev
->virq_disabled
= false;
310 for (i
= 0; i
< vdev
->num_regions
; i
++)
311 vdev
->region
[i
].ops
->release(vdev
, &vdev
->region
[i
]);
313 vdev
->num_regions
= 0;
315 vdev
->region
= NULL
; /* don't krealloc a freed pointer */
317 vfio_config_free(vdev
);
319 for (bar
= PCI_STD_RESOURCES
; bar
<= PCI_STD_RESOURCE_END
; bar
++) {
320 if (!vdev
->barmap
[bar
])
322 pci_iounmap(pdev
, vdev
->barmap
[bar
]);
323 pci_release_selected_regions(pdev
, 1 << bar
);
324 vdev
->barmap
[bar
] = NULL
;
327 list_for_each_entry_safe(dummy_res
, tmp
,
328 &vdev
->dummy_resources_list
, res_next
) {
329 list_del(&dummy_res
->res_next
);
330 release_resource(&dummy_res
->resource
);
334 vdev
->needs_reset
= true;
337 * If we have saved state, restore it. If we can reset the device,
338 * even better. Resetting with current state seems better than
339 * nothing, but saving and restoring current state without reset
342 if (pci_load_and_free_saved_state(pdev
, &vdev
->pci_saved_state
)) {
343 pr_info("%s: Couldn't reload %s saved state\n",
344 __func__
, dev_name(&pdev
->dev
));
346 if (!vdev
->reset_works
)
349 pci_save_state(pdev
);
353 * Disable INTx and MSI, presumably to avoid spurious interrupts
354 * during reset. Stolen from pci_reset_function()
356 pci_write_config_word(pdev
, PCI_COMMAND
, PCI_COMMAND_INTX_DISABLE
);
359 * Try to reset the device. The success of this is dependent on
360 * being able to lock the device, which is not always possible.
362 if (vdev
->reset_works
&& !pci_try_reset_function(pdev
))
363 vdev
->needs_reset
= false;
365 pci_restore_state(pdev
);
367 pci_disable_device(pdev
);
369 vfio_pci_try_bus_reset(vdev
);
371 if (!disable_idle_d3
)
372 pci_set_power_state(pdev
, PCI_D3hot
);
375 static void vfio_pci_release(void *device_data
)
377 struct vfio_pci_device
*vdev
= device_data
;
379 mutex_lock(&driver_lock
);
381 if (!(--vdev
->refcnt
)) {
382 vfio_spapr_pci_eeh_release(vdev
->pdev
);
383 vfio_pci_disable(vdev
);
386 mutex_unlock(&driver_lock
);
388 module_put(THIS_MODULE
);
391 static int vfio_pci_open(void *device_data
)
393 struct vfio_pci_device
*vdev
= device_data
;
396 if (!try_module_get(THIS_MODULE
))
399 mutex_lock(&driver_lock
);
402 ret
= vfio_pci_enable(vdev
);
406 vfio_spapr_pci_eeh_open(vdev
->pdev
);
410 mutex_unlock(&driver_lock
);
412 module_put(THIS_MODULE
);
416 static int vfio_pci_get_irq_count(struct vfio_pci_device
*vdev
, int irq_type
)
418 if (irq_type
== VFIO_PCI_INTX_IRQ_INDEX
) {
420 pci_read_config_byte(vdev
->pdev
, PCI_INTERRUPT_PIN
, &pin
);
421 if (IS_ENABLED(CONFIG_VFIO_PCI_INTX
) && !vdev
->nointx
&& pin
)
424 } else if (irq_type
== VFIO_PCI_MSI_IRQ_INDEX
) {
428 pos
= vdev
->pdev
->msi_cap
;
430 pci_read_config_word(vdev
->pdev
,
431 pos
+ PCI_MSI_FLAGS
, &flags
);
432 return 1 << ((flags
& PCI_MSI_FLAGS_QMASK
) >> 1);
434 } else if (irq_type
== VFIO_PCI_MSIX_IRQ_INDEX
) {
438 pos
= vdev
->pdev
->msix_cap
;
440 pci_read_config_word(vdev
->pdev
,
441 pos
+ PCI_MSIX_FLAGS
, &flags
);
443 return (flags
& PCI_MSIX_FLAGS_QSIZE
) + 1;
445 } else if (irq_type
== VFIO_PCI_ERR_IRQ_INDEX
) {
446 if (pci_is_pcie(vdev
->pdev
))
448 } else if (irq_type
== VFIO_PCI_REQ_IRQ_INDEX
) {
455 static int vfio_pci_count_devs(struct pci_dev
*pdev
, void *data
)
461 struct vfio_pci_fill_info
{
464 struct vfio_pci_dependent_device
*devices
;
467 static int vfio_pci_fill_devs(struct pci_dev
*pdev
, void *data
)
469 struct vfio_pci_fill_info
*fill
= data
;
470 struct iommu_group
*iommu_group
;
472 if (fill
->cur
== fill
->max
)
473 return -EAGAIN
; /* Something changed, try again */
475 iommu_group
= iommu_group_get(&pdev
->dev
);
477 return -EPERM
; /* Cannot reset non-isolated devices */
479 fill
->devices
[fill
->cur
].group_id
= iommu_group_id(iommu_group
);
480 fill
->devices
[fill
->cur
].segment
= pci_domain_nr(pdev
->bus
);
481 fill
->devices
[fill
->cur
].bus
= pdev
->bus
->number
;
482 fill
->devices
[fill
->cur
].devfn
= pdev
->devfn
;
484 iommu_group_put(iommu_group
);
488 struct vfio_pci_group_entry
{
489 struct vfio_group
*group
;
493 struct vfio_pci_group_info
{
495 struct vfio_pci_group_entry
*groups
;
498 static int vfio_pci_validate_devs(struct pci_dev
*pdev
, void *data
)
500 struct vfio_pci_group_info
*info
= data
;
501 struct iommu_group
*group
;
504 group
= iommu_group_get(&pdev
->dev
);
508 id
= iommu_group_id(group
);
510 for (i
= 0; i
< info
->count
; i
++)
511 if (info
->groups
[i
].id
== id
)
514 iommu_group_put(group
);
516 return (i
== info
->count
) ? -EINVAL
: 0;
519 static bool vfio_pci_dev_below_slot(struct pci_dev
*pdev
, struct pci_slot
*slot
)
521 for (; pdev
; pdev
= pdev
->bus
->self
)
522 if (pdev
->bus
== slot
->bus
)
523 return (pdev
->slot
== slot
);
527 struct vfio_pci_walk_info
{
528 int (*fn
)(struct pci_dev
*, void *data
);
530 struct pci_dev
*pdev
;
535 static int vfio_pci_walk_wrapper(struct pci_dev
*pdev
, void *data
)
537 struct vfio_pci_walk_info
*walk
= data
;
539 if (!walk
->slot
|| vfio_pci_dev_below_slot(pdev
, walk
->pdev
->slot
))
540 walk
->ret
= walk
->fn(pdev
, walk
->data
);
545 static int vfio_pci_for_each_slot_or_bus(struct pci_dev
*pdev
,
546 int (*fn
)(struct pci_dev
*,
547 void *data
), void *data
,
550 struct vfio_pci_walk_info walk
= {
551 .fn
= fn
, .data
= data
, .pdev
= pdev
, .slot
= slot
, .ret
= 0,
554 pci_walk_bus(pdev
->bus
, vfio_pci_walk_wrapper
, &walk
);
559 static int msix_sparse_mmap_cap(struct vfio_pci_device
*vdev
,
560 struct vfio_info_cap
*caps
)
562 struct vfio_info_cap_header
*header
;
563 struct vfio_region_info_cap_sparse_mmap
*sparse
;
565 int nr_areas
= 2, i
= 0;
567 end
= pci_resource_len(vdev
->pdev
, vdev
->msix_bar
);
569 /* If MSI-X table is aligned to the start or end, only one area */
570 if (((vdev
->msix_offset
& PAGE_MASK
) == 0) ||
571 (PAGE_ALIGN(vdev
->msix_offset
+ vdev
->msix_size
) >= end
))
574 size
= sizeof(*sparse
) + (nr_areas
* sizeof(*sparse
->areas
));
576 header
= vfio_info_cap_add(caps
, size
,
577 VFIO_REGION_INFO_CAP_SPARSE_MMAP
, 1);
579 return PTR_ERR(header
);
581 sparse
= container_of(header
,
582 struct vfio_region_info_cap_sparse_mmap
, header
);
583 sparse
->nr_areas
= nr_areas
;
585 if (vdev
->msix_offset
& PAGE_MASK
) {
586 sparse
->areas
[i
].offset
= 0;
587 sparse
->areas
[i
].size
= vdev
->msix_offset
& PAGE_MASK
;
591 if (PAGE_ALIGN(vdev
->msix_offset
+ vdev
->msix_size
) < end
) {
592 sparse
->areas
[i
].offset
= PAGE_ALIGN(vdev
->msix_offset
+
594 sparse
->areas
[i
].size
= end
- sparse
->areas
[i
].offset
;
601 static int region_type_cap(struct vfio_pci_device
*vdev
,
602 struct vfio_info_cap
*caps
,
603 unsigned int type
, unsigned int subtype
)
605 struct vfio_info_cap_header
*header
;
606 struct vfio_region_info_cap_type
*cap
;
608 header
= vfio_info_cap_add(caps
, sizeof(*cap
),
609 VFIO_REGION_INFO_CAP_TYPE
, 1);
611 return PTR_ERR(header
);
613 cap
= container_of(header
, struct vfio_region_info_cap_type
, header
);
615 cap
->subtype
= subtype
;
620 int vfio_pci_register_dev_region(struct vfio_pci_device
*vdev
,
621 unsigned int type
, unsigned int subtype
,
622 const struct vfio_pci_regops
*ops
,
623 size_t size
, u32 flags
, void *data
)
625 struct vfio_pci_region
*region
;
627 region
= krealloc(vdev
->region
,
628 (vdev
->num_regions
+ 1) * sizeof(*region
),
633 vdev
->region
= region
;
634 vdev
->region
[vdev
->num_regions
].type
= type
;
635 vdev
->region
[vdev
->num_regions
].subtype
= subtype
;
636 vdev
->region
[vdev
->num_regions
].ops
= ops
;
637 vdev
->region
[vdev
->num_regions
].size
= size
;
638 vdev
->region
[vdev
->num_regions
].flags
= flags
;
639 vdev
->region
[vdev
->num_regions
].data
= data
;
646 static long vfio_pci_ioctl(void *device_data
,
647 unsigned int cmd
, unsigned long arg
)
649 struct vfio_pci_device
*vdev
= device_data
;
652 if (cmd
== VFIO_DEVICE_GET_INFO
) {
653 struct vfio_device_info info
;
655 minsz
= offsetofend(struct vfio_device_info
, num_irqs
);
657 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
660 if (info
.argsz
< minsz
)
663 info
.flags
= VFIO_DEVICE_FLAGS_PCI
;
665 if (vdev
->reset_works
)
666 info
.flags
|= VFIO_DEVICE_FLAGS_RESET
;
668 info
.num_regions
= VFIO_PCI_NUM_REGIONS
+ vdev
->num_regions
;
669 info
.num_irqs
= VFIO_PCI_NUM_IRQS
;
671 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
674 } else if (cmd
== VFIO_DEVICE_GET_REGION_INFO
) {
675 struct pci_dev
*pdev
= vdev
->pdev
;
676 struct vfio_region_info info
;
677 struct vfio_info_cap caps
= { .buf
= NULL
, .size
= 0 };
680 minsz
= offsetofend(struct vfio_region_info
, offset
);
682 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
685 if (info
.argsz
< minsz
)
688 switch (info
.index
) {
689 case VFIO_PCI_CONFIG_REGION_INDEX
:
690 info
.offset
= VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
691 info
.size
= pdev
->cfg_size
;
692 info
.flags
= VFIO_REGION_INFO_FLAG_READ
|
693 VFIO_REGION_INFO_FLAG_WRITE
;
695 case VFIO_PCI_BAR0_REGION_INDEX
... VFIO_PCI_BAR5_REGION_INDEX
:
696 info
.offset
= VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
697 info
.size
= pci_resource_len(pdev
, info
.index
);
703 info
.flags
= VFIO_REGION_INFO_FLAG_READ
|
704 VFIO_REGION_INFO_FLAG_WRITE
;
705 if (vdev
->bar_mmap_supported
[info
.index
]) {
706 info
.flags
|= VFIO_REGION_INFO_FLAG_MMAP
;
707 if (info
.index
== vdev
->msix_bar
) {
708 ret
= msix_sparse_mmap_cap(vdev
, &caps
);
715 case VFIO_PCI_ROM_REGION_INDEX
:
720 info
.offset
= VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
723 /* Report the BAR size, not the ROM size */
724 info
.size
= pci_resource_len(pdev
, info
.index
);
726 /* Shadow ROMs appear as PCI option ROMs */
727 if (pdev
->resource
[PCI_ROM_RESOURCE
].flags
&
728 IORESOURCE_ROM_SHADOW
)
734 /* Is it really there? */
735 io
= pci_map_rom(pdev
, &size
);
740 pci_unmap_rom(pdev
, io
);
742 info
.flags
= VFIO_REGION_INFO_FLAG_READ
;
745 case VFIO_PCI_VGA_REGION_INDEX
:
749 info
.offset
= VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
751 info
.flags
= VFIO_REGION_INFO_FLAG_READ
|
752 VFIO_REGION_INFO_FLAG_WRITE
;
757 VFIO_PCI_NUM_REGIONS
+ vdev
->num_regions
)
759 info
.index
= array_index_nospec(info
.index
,
760 VFIO_PCI_NUM_REGIONS
+
763 i
= info
.index
- VFIO_PCI_NUM_REGIONS
;
765 info
.offset
= VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
766 info
.size
= vdev
->region
[i
].size
;
767 info
.flags
= vdev
->region
[i
].flags
;
769 ret
= region_type_cap(vdev
, &caps
,
770 vdev
->region
[i
].type
,
771 vdev
->region
[i
].subtype
);
777 info
.flags
|= VFIO_REGION_INFO_FLAG_CAPS
;
778 if (info
.argsz
< sizeof(info
) + caps
.size
) {
779 info
.argsz
= sizeof(info
) + caps
.size
;
782 vfio_info_cap_shift(&caps
, sizeof(info
));
783 if (copy_to_user((void __user
*)arg
+
784 sizeof(info
), caps
.buf
,
789 info
.cap_offset
= sizeof(info
);
795 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
798 } else if (cmd
== VFIO_DEVICE_GET_IRQ_INFO
) {
799 struct vfio_irq_info info
;
801 minsz
= offsetofend(struct vfio_irq_info
, count
);
803 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
806 if (info
.argsz
< minsz
|| info
.index
>= VFIO_PCI_NUM_IRQS
)
809 switch (info
.index
) {
810 case VFIO_PCI_INTX_IRQ_INDEX
... VFIO_PCI_MSIX_IRQ_INDEX
:
811 case VFIO_PCI_REQ_IRQ_INDEX
:
813 case VFIO_PCI_ERR_IRQ_INDEX
:
814 if (pci_is_pcie(vdev
->pdev
))
816 /* pass thru to return error */
821 info
.flags
= VFIO_IRQ_INFO_EVENTFD
;
823 info
.count
= vfio_pci_get_irq_count(vdev
, info
.index
);
825 if (info
.index
== VFIO_PCI_INTX_IRQ_INDEX
)
826 info
.flags
|= (VFIO_IRQ_INFO_MASKABLE
|
827 VFIO_IRQ_INFO_AUTOMASKED
);
829 info
.flags
|= VFIO_IRQ_INFO_NORESIZE
;
831 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
834 } else if (cmd
== VFIO_DEVICE_SET_IRQS
) {
835 struct vfio_irq_set hdr
;
840 minsz
= offsetofend(struct vfio_irq_set
, count
);
842 if (copy_from_user(&hdr
, (void __user
*)arg
, minsz
))
845 if (hdr
.argsz
< minsz
|| hdr
.index
>= VFIO_PCI_NUM_IRQS
||
846 hdr
.count
>= (U32_MAX
- hdr
.start
) ||
847 hdr
.flags
& ~(VFIO_IRQ_SET_DATA_TYPE_MASK
|
848 VFIO_IRQ_SET_ACTION_TYPE_MASK
))
851 max
= vfio_pci_get_irq_count(vdev
, hdr
.index
);
852 if (hdr
.start
>= max
|| hdr
.start
+ hdr
.count
> max
)
855 switch (hdr
.flags
& VFIO_IRQ_SET_DATA_TYPE_MASK
) {
856 case VFIO_IRQ_SET_DATA_NONE
:
859 case VFIO_IRQ_SET_DATA_BOOL
:
860 size
= sizeof(uint8_t);
862 case VFIO_IRQ_SET_DATA_EVENTFD
:
863 size
= sizeof(int32_t);
870 if (hdr
.argsz
- minsz
< hdr
.count
* size
)
873 data
= memdup_user((void __user
*)(arg
+ minsz
),
876 return PTR_ERR(data
);
879 mutex_lock(&vdev
->igate
);
881 ret
= vfio_pci_set_irqs_ioctl(vdev
, hdr
.flags
, hdr
.index
,
882 hdr
.start
, hdr
.count
, data
);
884 mutex_unlock(&vdev
->igate
);
889 } else if (cmd
== VFIO_DEVICE_RESET
) {
890 return vdev
->reset_works
?
891 pci_try_reset_function(vdev
->pdev
) : -EINVAL
;
893 } else if (cmd
== VFIO_DEVICE_GET_PCI_HOT_RESET_INFO
) {
894 struct vfio_pci_hot_reset_info hdr
;
895 struct vfio_pci_fill_info fill
= { 0 };
896 struct vfio_pci_dependent_device
*devices
= NULL
;
900 minsz
= offsetofend(struct vfio_pci_hot_reset_info
, count
);
902 if (copy_from_user(&hdr
, (void __user
*)arg
, minsz
))
905 if (hdr
.argsz
< minsz
)
910 /* Can we do a slot or bus reset or neither? */
911 if (!pci_probe_reset_slot(vdev
->pdev
->slot
))
913 else if (pci_probe_reset_bus(vdev
->pdev
->bus
))
916 /* How many devices are affected? */
917 ret
= vfio_pci_for_each_slot_or_bus(vdev
->pdev
,
923 WARN_ON(!fill
.max
); /* Should always be at least one */
926 * If there's enough space, fill it now, otherwise return
927 * -ENOSPC and the number of devices affected.
929 if (hdr
.argsz
< sizeof(hdr
) + (fill
.max
* sizeof(*devices
))) {
931 hdr
.count
= fill
.max
;
932 goto reset_info_exit
;
935 devices
= kcalloc(fill
.max
, sizeof(*devices
), GFP_KERNEL
);
939 fill
.devices
= devices
;
941 ret
= vfio_pci_for_each_slot_or_bus(vdev
->pdev
,
946 * If a device was removed between counting and filling,
947 * we may come up short of fill.max. If a device was
948 * added, we'll have a return of -EAGAIN above.
951 hdr
.count
= fill
.cur
;
954 if (copy_to_user((void __user
*)arg
, &hdr
, minsz
))
958 if (copy_to_user((void __user
*)(arg
+ minsz
), devices
,
959 hdr
.count
* sizeof(*devices
)))
966 } else if (cmd
== VFIO_DEVICE_PCI_HOT_RESET
) {
967 struct vfio_pci_hot_reset hdr
;
969 struct vfio_pci_group_entry
*groups
;
970 struct vfio_pci_group_info info
;
972 int i
, count
= 0, ret
= 0;
974 minsz
= offsetofend(struct vfio_pci_hot_reset
, count
);
976 if (copy_from_user(&hdr
, (void __user
*)arg
, minsz
))
979 if (hdr
.argsz
< minsz
|| hdr
.flags
)
982 /* Can we do a slot or bus reset or neither? */
983 if (!pci_probe_reset_slot(vdev
->pdev
->slot
))
985 else if (pci_probe_reset_bus(vdev
->pdev
->bus
))
989 * We can't let userspace give us an arbitrarily large
990 * buffer to copy, so verify how many we think there
991 * could be. Note groups can have multiple devices so
992 * one group per device is the max.
994 ret
= vfio_pci_for_each_slot_or_bus(vdev
->pdev
,
1000 /* Somewhere between 1 and count is OK */
1001 if (!hdr
.count
|| hdr
.count
> count
)
1004 group_fds
= kcalloc(hdr
.count
, sizeof(*group_fds
), GFP_KERNEL
);
1005 groups
= kcalloc(hdr
.count
, sizeof(*groups
), GFP_KERNEL
);
1006 if (!group_fds
|| !groups
) {
1012 if (copy_from_user(group_fds
, (void __user
*)(arg
+ minsz
),
1013 hdr
.count
* sizeof(*group_fds
))) {
1020 * For each group_fd, get the group through the vfio external
1021 * user interface and store the group and iommu ID. This
1022 * ensures the group is held across the reset.
1024 for (i
= 0; i
< hdr
.count
; i
++) {
1025 struct vfio_group
*group
;
1026 struct fd f
= fdget(group_fds
[i
]);
1032 group
= vfio_group_get_external_user(f
.file
);
1034 if (IS_ERR(group
)) {
1035 ret
= PTR_ERR(group
);
1039 groups
[i
].group
= group
;
1040 groups
[i
].id
= vfio_external_user_iommu_id(group
);
1045 /* release reference to groups on error */
1047 goto hot_reset_release
;
1049 info
.count
= hdr
.count
;
1050 info
.groups
= groups
;
1053 * Test whether all the affected devices are contained
1054 * by the set of groups provided by the user.
1056 ret
= vfio_pci_for_each_slot_or_bus(vdev
->pdev
,
1057 vfio_pci_validate_devs
,
1060 /* User has access, do the reset */
1061 ret
= slot
? pci_try_reset_slot(vdev
->pdev
->slot
) :
1062 pci_try_reset_bus(vdev
->pdev
->bus
);
1065 for (i
--; i
>= 0; i
--)
1066 vfio_group_put_external_user(groups
[i
].group
);
1075 static ssize_t
vfio_pci_rw(void *device_data
, char __user
*buf
,
1076 size_t count
, loff_t
*ppos
, bool iswrite
)
1078 unsigned int index
= VFIO_PCI_OFFSET_TO_INDEX(*ppos
);
1079 struct vfio_pci_device
*vdev
= device_data
;
1081 if (index
>= VFIO_PCI_NUM_REGIONS
+ vdev
->num_regions
)
1085 case VFIO_PCI_CONFIG_REGION_INDEX
:
1086 return vfio_pci_config_rw(vdev
, buf
, count
, ppos
, iswrite
);
1088 case VFIO_PCI_ROM_REGION_INDEX
:
1091 return vfio_pci_bar_rw(vdev
, buf
, count
, ppos
, false);
1093 case VFIO_PCI_BAR0_REGION_INDEX
... VFIO_PCI_BAR5_REGION_INDEX
:
1094 return vfio_pci_bar_rw(vdev
, buf
, count
, ppos
, iswrite
);
1096 case VFIO_PCI_VGA_REGION_INDEX
:
1097 return vfio_pci_vga_rw(vdev
, buf
, count
, ppos
, iswrite
);
1099 index
-= VFIO_PCI_NUM_REGIONS
;
1100 return vdev
->region
[index
].ops
->rw(vdev
, buf
,
1101 count
, ppos
, iswrite
);
1107 static ssize_t
vfio_pci_read(void *device_data
, char __user
*buf
,
1108 size_t count
, loff_t
*ppos
)
1113 return vfio_pci_rw(device_data
, buf
, count
, ppos
, false);
1116 static ssize_t
vfio_pci_write(void *device_data
, const char __user
*buf
,
1117 size_t count
, loff_t
*ppos
)
1122 return vfio_pci_rw(device_data
, (char __user
*)buf
, count
, ppos
, true);
1125 static int vfio_pci_mmap(void *device_data
, struct vm_area_struct
*vma
)
1127 struct vfio_pci_device
*vdev
= device_data
;
1128 struct pci_dev
*pdev
= vdev
->pdev
;
1130 u64 phys_len
, req_len
, pgoff
, req_start
;
1133 index
= vma
->vm_pgoff
>> (VFIO_PCI_OFFSET_SHIFT
- PAGE_SHIFT
);
1135 if (vma
->vm_end
< vma
->vm_start
)
1137 if ((vma
->vm_flags
& VM_SHARED
) == 0)
1139 if (index
>= VFIO_PCI_ROM_REGION_INDEX
)
1141 if (!vdev
->bar_mmap_supported
[index
])
1144 phys_len
= PAGE_ALIGN(pci_resource_len(pdev
, index
));
1145 req_len
= vma
->vm_end
- vma
->vm_start
;
1146 pgoff
= vma
->vm_pgoff
&
1147 ((1U << (VFIO_PCI_OFFSET_SHIFT
- PAGE_SHIFT
)) - 1);
1148 req_start
= pgoff
<< PAGE_SHIFT
;
1150 if (req_start
+ req_len
> phys_len
)
1153 if (index
== vdev
->msix_bar
) {
1155 * Disallow mmaps overlapping the MSI-X table; users don't
1156 * get to touch this directly. We could find somewhere
1157 * else to map the overlap, but page granularity is only
1158 * a recommendation, not a requirement, so the user needs
1159 * to know which bits are real. Requiring them to mmap
1160 * around the table makes that clear.
1163 /* If neither entirely above nor below, then it overlaps */
1164 if (!(req_start
>= vdev
->msix_offset
+ vdev
->msix_size
||
1165 req_start
+ req_len
<= vdev
->msix_offset
))
1170 * Even though we don't make use of the barmap for the mmap,
1171 * we need to request the region and the barmap tracks that.
1173 if (!vdev
->barmap
[index
]) {
1174 ret
= pci_request_selected_regions(pdev
,
1175 1 << index
, "vfio-pci");
1179 vdev
->barmap
[index
] = pci_iomap(pdev
, index
, 0);
1180 if (!vdev
->barmap
[index
]) {
1181 pci_release_selected_regions(pdev
, 1 << index
);
1186 vma
->vm_private_data
= vdev
;
1187 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1188 vma
->vm_pgoff
= (pci_resource_start(pdev
, index
) >> PAGE_SHIFT
) + pgoff
;
1190 return remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
1191 req_len
, vma
->vm_page_prot
);
1194 static void vfio_pci_request(void *device_data
, unsigned int count
)
1196 struct vfio_pci_device
*vdev
= device_data
;
1198 mutex_lock(&vdev
->igate
);
1200 if (vdev
->req_trigger
) {
1202 dev_notice_ratelimited(&vdev
->pdev
->dev
,
1203 "Relaying device request to user (#%u)\n",
1205 eventfd_signal(vdev
->req_trigger
, 1);
1206 } else if (count
== 0) {
1207 dev_warn(&vdev
->pdev
->dev
,
1208 "No device request channel registered, blocked until released by user\n");
1211 mutex_unlock(&vdev
->igate
);
1214 static const struct vfio_device_ops vfio_pci_ops
= {
1216 .open
= vfio_pci_open
,
1217 .release
= vfio_pci_release
,
1218 .ioctl
= vfio_pci_ioctl
,
1219 .read
= vfio_pci_read
,
1220 .write
= vfio_pci_write
,
1221 .mmap
= vfio_pci_mmap
,
1222 .request
= vfio_pci_request
,
1225 static int vfio_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
1227 struct vfio_pci_device
*vdev
;
1228 struct iommu_group
*group
;
1231 if (pdev
->hdr_type
!= PCI_HEADER_TYPE_NORMAL
)
1234 group
= vfio_iommu_group_get(&pdev
->dev
);
1238 vdev
= kzalloc(sizeof(*vdev
), GFP_KERNEL
);
1240 vfio_iommu_group_put(group
, &pdev
->dev
);
1245 vdev
->irq_type
= VFIO_PCI_NUM_IRQS
;
1246 mutex_init(&vdev
->igate
);
1247 spin_lock_init(&vdev
->irqlock
);
1249 ret
= vfio_add_group_dev(&pdev
->dev
, &vfio_pci_ops
, vdev
);
1251 vfio_iommu_group_put(group
, &pdev
->dev
);
1256 if (vfio_pci_is_vga(pdev
)) {
1257 vga_client_register(pdev
, vdev
, NULL
, vfio_pci_set_vga_decode
);
1258 vga_set_legacy_decoding(pdev
,
1259 vfio_pci_set_vga_decode(vdev
, false));
1262 if (!disable_idle_d3
) {
1264 * pci-core sets the device power state to an unknown value at
1265 * bootup and after being removed from a driver. The only
1266 * transition it allows from this unknown state is to D0, which
1267 * typically happens when a driver calls pci_enable_device().
1268 * We're not ready to enable the device yet, but we do want to
1269 * be able to get to D3. Therefore first do a D0 transition
1270 * before going to D3.
1272 pci_set_power_state(pdev
, PCI_D0
);
1273 pci_set_power_state(pdev
, PCI_D3hot
);
1279 static void vfio_pci_remove(struct pci_dev
*pdev
)
1281 struct vfio_pci_device
*vdev
;
1283 vdev
= vfio_del_group_dev(&pdev
->dev
);
1287 vfio_iommu_group_put(pdev
->dev
.iommu_group
, &pdev
->dev
);
1288 kfree(vdev
->region
);
1291 if (vfio_pci_is_vga(pdev
)) {
1292 vga_client_register(pdev
, NULL
, NULL
, NULL
);
1293 vga_set_legacy_decoding(pdev
,
1294 VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
|
1295 VGA_RSRC_LEGACY_IO
| VGA_RSRC_LEGACY_MEM
);
1298 if (!disable_idle_d3
)
1299 pci_set_power_state(pdev
, PCI_D0
);
1302 static pci_ers_result_t
vfio_pci_aer_err_detected(struct pci_dev
*pdev
,
1303 pci_channel_state_t state
)
1305 struct vfio_pci_device
*vdev
;
1306 struct vfio_device
*device
;
1308 device
= vfio_device_get_from_dev(&pdev
->dev
);
1310 return PCI_ERS_RESULT_DISCONNECT
;
1312 vdev
= vfio_device_data(device
);
1314 vfio_device_put(device
);
1315 return PCI_ERS_RESULT_DISCONNECT
;
1318 mutex_lock(&vdev
->igate
);
1320 if (vdev
->err_trigger
)
1321 eventfd_signal(vdev
->err_trigger
, 1);
1323 mutex_unlock(&vdev
->igate
);
1325 vfio_device_put(device
);
1327 return PCI_ERS_RESULT_CAN_RECOVER
;
1330 static const struct pci_error_handlers vfio_err_handlers
= {
1331 .error_detected
= vfio_pci_aer_err_detected
,
1334 static struct pci_driver vfio_pci_driver
= {
1336 .id_table
= NULL
, /* only dynamic ids */
1337 .probe
= vfio_pci_probe
,
1338 .remove
= vfio_pci_remove
,
1339 .err_handler
= &vfio_err_handlers
,
1342 struct vfio_devices
{
1343 struct vfio_device
**devices
;
1348 static int vfio_pci_get_devs(struct pci_dev
*pdev
, void *data
)
1350 struct vfio_devices
*devs
= data
;
1351 struct vfio_device
*device
;
1353 if (devs
->cur_index
== devs
->max_index
)
1356 device
= vfio_device_get_from_dev(&pdev
->dev
);
1360 if (pci_dev_driver(pdev
) != &vfio_pci_driver
) {
1361 vfio_device_put(device
);
1365 devs
->devices
[devs
->cur_index
++] = device
;
1370 * Attempt to do a bus/slot reset if there are devices affected by a reset for
1371 * this device that are needs_reset and all of the affected devices are unused
1372 * (!refcnt). Callers are required to hold driver_lock when calling this to
1373 * prevent device opens and concurrent bus reset attempts. We prevent device
1374 * unbinds by acquiring and holding a reference to the vfio_device.
1376 * NB: vfio-core considers a group to be viable even if some devices are
1377 * bound to drivers like pci-stub or pcieport. Here we require all devices
1378 * to be bound to vfio_pci since that's the only way we can be sure they
1381 static void vfio_pci_try_bus_reset(struct vfio_pci_device
*vdev
)
1383 struct vfio_devices devs
= { .cur_index
= 0 };
1384 int i
= 0, ret
= -EINVAL
;
1385 bool needs_reset
= false, slot
= false;
1386 struct vfio_pci_device
*tmp
;
1388 if (!pci_probe_reset_slot(vdev
->pdev
->slot
))
1390 else if (pci_probe_reset_bus(vdev
->pdev
->bus
))
1393 if (vfio_pci_for_each_slot_or_bus(vdev
->pdev
, vfio_pci_count_devs
,
1398 devs
.devices
= kcalloc(i
, sizeof(struct vfio_device
*), GFP_KERNEL
);
1402 if (vfio_pci_for_each_slot_or_bus(vdev
->pdev
,
1403 vfio_pci_get_devs
, &devs
, slot
))
1406 for (i
= 0; i
< devs
.cur_index
; i
++) {
1407 tmp
= vfio_device_data(devs
.devices
[i
]);
1408 if (tmp
->needs_reset
)
1415 ret
= slot
? pci_try_reset_slot(vdev
->pdev
->slot
) :
1416 pci_try_reset_bus(vdev
->pdev
->bus
);
1419 for (i
= 0; i
< devs
.cur_index
; i
++) {
1420 tmp
= vfio_device_data(devs
.devices
[i
]);
1422 tmp
->needs_reset
= false;
1424 if (!tmp
->refcnt
&& !disable_idle_d3
)
1425 pci_set_power_state(tmp
->pdev
, PCI_D3hot
);
1427 vfio_device_put(devs
.devices
[i
]);
1430 kfree(devs
.devices
);
1433 static void __exit
vfio_pci_cleanup(void)
1435 pci_unregister_driver(&vfio_pci_driver
);
1436 vfio_pci_uninit_perm_bits();
1439 static void __init
vfio_pci_fill_ids(void)
1444 /* no ids passed actually */
1448 /* add ids specified in the module parameter */
1450 while ((id
= strsep(&p
, ","))) {
1451 unsigned int vendor
, device
, subvendor
= PCI_ANY_ID
,
1452 subdevice
= PCI_ANY_ID
, class = 0, class_mask
= 0;
1458 fields
= sscanf(id
, "%x:%x:%x:%x:%x:%x",
1459 &vendor
, &device
, &subvendor
, &subdevice
,
1460 &class, &class_mask
);
1463 pr_warn("invalid id string \"%s\"\n", id
);
1467 rc
= pci_add_dynid(&vfio_pci_driver
, vendor
, device
,
1468 subvendor
, subdevice
, class, class_mask
, 0);
1470 pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n",
1471 vendor
, device
, subvendor
, subdevice
,
1472 class, class_mask
, rc
);
1474 pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n",
1475 vendor
, device
, subvendor
, subdevice
,
1480 static int __init
vfio_pci_init(void)
1484 /* Allocate shared config space permision data used by all devices */
1485 ret
= vfio_pci_init_perm_bits();
1489 /* Register and scan for devices */
1490 ret
= pci_register_driver(&vfio_pci_driver
);
1494 vfio_pci_fill_ids();
1499 vfio_pci_uninit_perm_bits();
1503 module_init(vfio_pci_init
);
1504 module_exit(vfio_pci_cleanup
);
1506 MODULE_VERSION(DRIVER_VERSION
);
1507 MODULE_LICENSE("GPL v2");
1508 MODULE_AUTHOR(DRIVER_AUTHOR
);
1509 MODULE_DESCRIPTION(DRIVER_DESC
);