1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
4 * Author: Alex Williamson <alex.williamson@redhat.com>
6 * Derived from original vfio:
7 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
8 * Author: Tom Lyon, pugs@cisco.com
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #define dev_fmt pr_fmt
14 #include <linux/device.h>
15 #include <linux/eventfd.h>
16 #include <linux/file.h>
17 #include <linux/interrupt.h>
18 #include <linux/iommu.h>
19 #include <linux/module.h>
20 #include <linux/mutex.h>
21 #include <linux/notifier.h>
22 #include <linux/pci.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/slab.h>
25 #include <linux/types.h>
26 #include <linux/uaccess.h>
27 #include <linux/vfio.h>
28 #include <linux/vgaarb.h>
29 #include <linux/nospec.h>
31 #include "vfio_pci_private.h"
33 #define DRIVER_VERSION "0.2"
34 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
35 #define DRIVER_DESC "VFIO PCI - User Level meta-driver"
37 static char ids
[1024] __initdata
;
38 module_param_string(ids
, ids
, sizeof(ids
), 0);
39 MODULE_PARM_DESC(ids
, "Initial PCI IDs to add to the vfio driver, format is \"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\" and multiple comma separated entries can be specified");
41 static bool nointxmask
;
42 module_param_named(nointxmask
, nointxmask
, bool, S_IRUGO
| S_IWUSR
);
43 MODULE_PARM_DESC(nointxmask
,
44 "Disable support for PCI 2.3 style INTx masking. If this resolves problems for specific devices, report lspci -vvvxxx to linux-pci@vger.kernel.org so the device can be fixed automatically via the broken_intx_masking flag.");
46 #ifdef CONFIG_VFIO_PCI_VGA
47 static bool disable_vga
;
48 module_param(disable_vga
, bool, S_IRUGO
);
49 MODULE_PARM_DESC(disable_vga
, "Disable VGA resource access through vfio-pci");
52 static bool disable_idle_d3
;
53 module_param(disable_idle_d3
, bool, S_IRUGO
| S_IWUSR
);
54 MODULE_PARM_DESC(disable_idle_d3
,
55 "Disable using the PCI D3 low power state for idle, unused devices");
57 static inline bool vfio_vga_disabled(void)
59 #ifdef CONFIG_VFIO_PCI_VGA
67 * Our VGA arbiter participation is limited since we don't know anything
68 * about the device itself. However, if the device is the only VGA device
69 * downstream of a bridge and VFIO VGA support is disabled, then we can
70 * safely return legacy VGA IO and memory as not decoded since the user
71 * has no way to get to it and routing can be disabled externally at the
74 static unsigned int vfio_pci_set_vga_decode(void *opaque
, bool single_vga
)
76 struct vfio_pci_device
*vdev
= opaque
;
77 struct pci_dev
*tmp
= NULL
, *pdev
= vdev
->pdev
;
78 unsigned char max_busnr
;
81 if (single_vga
|| !vfio_vga_disabled() || pci_is_root_bus(pdev
->bus
))
82 return VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
|
83 VGA_RSRC_LEGACY_IO
| VGA_RSRC_LEGACY_MEM
;
85 max_busnr
= pci_bus_max_busnr(pdev
->bus
);
86 decodes
= VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
88 while ((tmp
= pci_get_class(PCI_CLASS_DISPLAY_VGA
<< 8, tmp
)) != NULL
) {
90 pci_domain_nr(tmp
->bus
) != pci_domain_nr(pdev
->bus
) ||
91 pci_is_root_bus(tmp
->bus
))
94 if (tmp
->bus
->number
>= pdev
->bus
->number
&&
95 tmp
->bus
->number
<= max_busnr
) {
97 decodes
|= VGA_RSRC_LEGACY_IO
| VGA_RSRC_LEGACY_MEM
;
105 static inline bool vfio_pci_is_vga(struct pci_dev
*pdev
)
107 return (pdev
->class >> 8) == PCI_CLASS_DISPLAY_VGA
;
110 static void vfio_pci_probe_mmaps(struct vfio_pci_device
*vdev
)
112 struct resource
*res
;
114 struct vfio_pci_dummy_resource
*dummy_res
;
116 INIT_LIST_HEAD(&vdev
->dummy_resources_list
);
118 for (i
= 0; i
< PCI_STD_NUM_BARS
; i
++) {
119 int bar
= i
+ PCI_STD_RESOURCES
;
121 res
= &vdev
->pdev
->resource
[bar
];
123 if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP
))
126 if (!(res
->flags
& IORESOURCE_MEM
))
130 * The PCI core shouldn't set up a resource with a
131 * type but zero size. But there may be bugs that
132 * cause us to do that.
134 if (!resource_size(res
))
137 if (resource_size(res
) >= PAGE_SIZE
) {
138 vdev
->bar_mmap_supported
[bar
] = true;
142 if (!(res
->start
& ~PAGE_MASK
)) {
144 * Add a dummy resource to reserve the remainder
145 * of the exclusive page in case that hot-add
146 * device's bar is assigned into it.
148 dummy_res
= kzalloc(sizeof(*dummy_res
), GFP_KERNEL
);
149 if (dummy_res
== NULL
)
152 dummy_res
->resource
.name
= "vfio sub-page reserved";
153 dummy_res
->resource
.start
= res
->end
+ 1;
154 dummy_res
->resource
.end
= res
->start
+ PAGE_SIZE
- 1;
155 dummy_res
->resource
.flags
= res
->flags
;
156 if (request_resource(res
->parent
,
157 &dummy_res
->resource
)) {
161 dummy_res
->index
= bar
;
162 list_add(&dummy_res
->res_next
,
163 &vdev
->dummy_resources_list
);
164 vdev
->bar_mmap_supported
[bar
] = true;
168 * Here we don't handle the case when the BAR is not page
169 * aligned because we can't expect the BAR will be
170 * assigned into the same location in a page in guest
171 * when we passthrough the BAR. And it's hard to access
172 * this BAR in userspace because we have no way to get
173 * the BAR's location in a page.
176 vdev
->bar_mmap_supported
[bar
] = false;
180 static void vfio_pci_try_bus_reset(struct vfio_pci_device
*vdev
);
181 static void vfio_pci_disable(struct vfio_pci_device
*vdev
);
184 * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
185 * _and_ the ability detect when the device is asserting INTx via PCI_STATUS.
186 * If a device implements the former but not the latter we would typically
187 * expect broken_intx_masking be set and require an exclusive interrupt.
188 * However since we do have control of the device's ability to assert INTx,
189 * we can instead pretend that the device does not implement INTx, virtualizing
190 * the pin register to report zero and maintaining DisINTx set on the host.
192 static bool vfio_pci_nointx(struct pci_dev
*pdev
)
194 switch (pdev
->vendor
) {
195 case PCI_VENDOR_ID_INTEL
:
196 switch (pdev
->device
) {
197 /* All i40e (XL710/X710/XXV710) 10/20/25/40GbE NICs */
200 case 0x1580 ... 0x1581:
201 case 0x1583 ... 0x158b:
202 case 0x37d0 ... 0x37d2:
212 static void vfio_pci_probe_power_state(struct vfio_pci_device
*vdev
)
214 struct pci_dev
*pdev
= vdev
->pdev
;
220 pci_read_config_word(pdev
, pdev
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
222 vdev
->needs_pm_restore
= !(pmcsr
& PCI_PM_CTRL_NO_SOFT_RESET
);
226 * pci_set_power_state() wrapper handling devices which perform a soft reset on
227 * D3->D0 transition. Save state prior to D0/1/2->D3, stash it on the vdev,
228 * restore when returned to D0. Saved separately from pci_saved_state for use
229 * by PM capability emulation and separately from pci_dev internal saved state
230 * to avoid it being overwritten and consumed around other resets.
232 int vfio_pci_set_power_state(struct vfio_pci_device
*vdev
, pci_power_t state
)
234 struct pci_dev
*pdev
= vdev
->pdev
;
235 bool needs_restore
= false, needs_save
= false;
238 if (vdev
->needs_pm_restore
) {
239 if (pdev
->current_state
< PCI_D3hot
&& state
>= PCI_D3hot
) {
240 pci_save_state(pdev
);
244 if (pdev
->current_state
>= PCI_D3hot
&& state
<= PCI_D0
)
245 needs_restore
= true;
248 ret
= pci_set_power_state(pdev
, state
);
251 /* D3 might be unsupported via quirk, skip unless in D3 */
252 if (needs_save
&& pdev
->current_state
>= PCI_D3hot
) {
253 vdev
->pm_save
= pci_store_saved_state(pdev
);
254 } else if (needs_restore
) {
255 pci_load_and_free_saved_state(pdev
, &vdev
->pm_save
);
256 pci_restore_state(pdev
);
263 static int vfio_pci_enable(struct vfio_pci_device
*vdev
)
265 struct pci_dev
*pdev
= vdev
->pdev
;
270 vfio_pci_set_power_state(vdev
, PCI_D0
);
272 /* Don't allow our initial saved state to include busmaster */
273 pci_clear_master(pdev
);
275 ret
= pci_enable_device(pdev
);
279 /* If reset fails because of the device lock, fail this path entirely */
280 ret
= pci_try_reset_function(pdev
);
281 if (ret
== -EAGAIN
) {
282 pci_disable_device(pdev
);
286 vdev
->reset_works
= !ret
;
287 pci_save_state(pdev
);
288 vdev
->pci_saved_state
= pci_store_saved_state(pdev
);
289 if (!vdev
->pci_saved_state
)
290 pci_dbg(pdev
, "%s: Couldn't store saved state\n", __func__
);
292 if (likely(!nointxmask
)) {
293 if (vfio_pci_nointx(pdev
)) {
294 pci_info(pdev
, "Masking broken INTx support\n");
298 vdev
->pci_2_3
= pci_intx_mask_supported(pdev
);
301 pci_read_config_word(pdev
, PCI_COMMAND
, &cmd
);
302 if (vdev
->pci_2_3
&& (cmd
& PCI_COMMAND_INTX_DISABLE
)) {
303 cmd
&= ~PCI_COMMAND_INTX_DISABLE
;
304 pci_write_config_word(pdev
, PCI_COMMAND
, cmd
);
307 ret
= vfio_config_init(vdev
);
309 kfree(vdev
->pci_saved_state
);
310 vdev
->pci_saved_state
= NULL
;
311 pci_disable_device(pdev
);
315 msix_pos
= pdev
->msix_cap
;
320 pci_read_config_word(pdev
, msix_pos
+ PCI_MSIX_FLAGS
, &flags
);
321 pci_read_config_dword(pdev
, msix_pos
+ PCI_MSIX_TABLE
, &table
);
323 vdev
->msix_bar
= table
& PCI_MSIX_TABLE_BIR
;
324 vdev
->msix_offset
= table
& PCI_MSIX_TABLE_OFFSET
;
325 vdev
->msix_size
= ((flags
& PCI_MSIX_FLAGS_QSIZE
) + 1) * 16;
327 vdev
->msix_bar
= 0xFF;
329 if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev
))
330 vdev
->has_vga
= true;
333 if (vfio_pci_is_vga(pdev
) &&
334 pdev
->vendor
== PCI_VENDOR_ID_INTEL
&&
335 IS_ENABLED(CONFIG_VFIO_PCI_IGD
)) {
336 ret
= vfio_pci_igd_init(vdev
);
338 pci_warn(pdev
, "Failed to setup Intel IGD regions\n");
343 if (pdev
->vendor
== PCI_VENDOR_ID_NVIDIA
&&
344 IS_ENABLED(CONFIG_VFIO_PCI_NVLINK2
)) {
345 ret
= vfio_pci_nvdia_v100_nvlink2_init(vdev
);
346 if (ret
&& ret
!= -ENODEV
) {
347 pci_warn(pdev
, "Failed to setup NVIDIA NV2 RAM region\n");
352 if (pdev
->vendor
== PCI_VENDOR_ID_IBM
&&
353 IS_ENABLED(CONFIG_VFIO_PCI_NVLINK2
)) {
354 ret
= vfio_pci_ibm_npu2_init(vdev
);
355 if (ret
&& ret
!= -ENODEV
) {
356 pci_warn(pdev
, "Failed to setup NVIDIA NV2 ATSD region\n");
361 vfio_pci_probe_mmaps(vdev
);
366 vfio_pci_disable(vdev
);
370 static void vfio_pci_disable(struct vfio_pci_device
*vdev
)
372 struct pci_dev
*pdev
= vdev
->pdev
;
373 struct vfio_pci_dummy_resource
*dummy_res
, *tmp
;
374 struct vfio_pci_ioeventfd
*ioeventfd
, *ioeventfd_tmp
;
377 /* Stop the device from further DMA */
378 pci_clear_master(pdev
);
380 vfio_pci_set_irqs_ioctl(vdev
, VFIO_IRQ_SET_DATA_NONE
|
381 VFIO_IRQ_SET_ACTION_TRIGGER
,
382 vdev
->irq_type
, 0, 0, NULL
);
384 /* Device closed, don't need mutex here */
385 list_for_each_entry_safe(ioeventfd
, ioeventfd_tmp
,
386 &vdev
->ioeventfds_list
, next
) {
387 vfio_virqfd_disable(&ioeventfd
->virqfd
);
388 list_del(&ioeventfd
->next
);
391 vdev
->ioeventfds_nr
= 0;
393 vdev
->virq_disabled
= false;
395 for (i
= 0; i
< vdev
->num_regions
; i
++)
396 vdev
->region
[i
].ops
->release(vdev
, &vdev
->region
[i
]);
398 vdev
->num_regions
= 0;
400 vdev
->region
= NULL
; /* don't krealloc a freed pointer */
402 vfio_config_free(vdev
);
404 for (i
= 0; i
< PCI_STD_NUM_BARS
; i
++) {
405 bar
= i
+ PCI_STD_RESOURCES
;
406 if (!vdev
->barmap
[bar
])
408 pci_iounmap(pdev
, vdev
->barmap
[bar
]);
409 pci_release_selected_regions(pdev
, 1 << bar
);
410 vdev
->barmap
[bar
] = NULL
;
413 list_for_each_entry_safe(dummy_res
, tmp
,
414 &vdev
->dummy_resources_list
, res_next
) {
415 list_del(&dummy_res
->res_next
);
416 release_resource(&dummy_res
->resource
);
420 vdev
->needs_reset
= true;
423 * If we have saved state, restore it. If we can reset the device,
424 * even better. Resetting with current state seems better than
425 * nothing, but saving and restoring current state without reset
428 if (pci_load_and_free_saved_state(pdev
, &vdev
->pci_saved_state
)) {
429 pci_info(pdev
, "%s: Couldn't reload saved state\n", __func__
);
431 if (!vdev
->reset_works
)
434 pci_save_state(pdev
);
438 * Disable INTx and MSI, presumably to avoid spurious interrupts
439 * during reset. Stolen from pci_reset_function()
441 pci_write_config_word(pdev
, PCI_COMMAND
, PCI_COMMAND_INTX_DISABLE
);
444 * Try to get the locks ourselves to prevent a deadlock. The
445 * success of this is dependent on being able to lock the device,
446 * which is not always possible.
447 * We can not use the "try" reset interface here, which will
448 * overwrite the previously restored configuration information.
450 if (vdev
->reset_works
&& pci_cfg_access_trylock(pdev
)) {
451 if (device_trylock(&pdev
->dev
)) {
452 if (!__pci_reset_function_locked(pdev
))
453 vdev
->needs_reset
= false;
454 device_unlock(&pdev
->dev
);
456 pci_cfg_access_unlock(pdev
);
459 pci_restore_state(pdev
);
461 pci_disable_device(pdev
);
463 vfio_pci_try_bus_reset(vdev
);
465 if (!disable_idle_d3
)
466 vfio_pci_set_power_state(vdev
, PCI_D3hot
);
469 static void vfio_pci_release(void *device_data
)
471 struct vfio_pci_device
*vdev
= device_data
;
473 mutex_lock(&vdev
->reflck
->lock
);
475 if (!(--vdev
->refcnt
)) {
476 vfio_spapr_pci_eeh_release(vdev
->pdev
);
477 vfio_pci_disable(vdev
);
480 mutex_unlock(&vdev
->reflck
->lock
);
482 module_put(THIS_MODULE
);
485 static int vfio_pci_open(void *device_data
)
487 struct vfio_pci_device
*vdev
= device_data
;
490 if (!try_module_get(THIS_MODULE
))
493 mutex_lock(&vdev
->reflck
->lock
);
496 ret
= vfio_pci_enable(vdev
);
500 vfio_spapr_pci_eeh_open(vdev
->pdev
);
504 mutex_unlock(&vdev
->reflck
->lock
);
506 module_put(THIS_MODULE
);
510 static int vfio_pci_get_irq_count(struct vfio_pci_device
*vdev
, int irq_type
)
512 if (irq_type
== VFIO_PCI_INTX_IRQ_INDEX
) {
515 if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX
) ||
516 vdev
->nointx
|| vdev
->pdev
->is_virtfn
)
519 pci_read_config_byte(vdev
->pdev
, PCI_INTERRUPT_PIN
, &pin
);
522 } else if (irq_type
== VFIO_PCI_MSI_IRQ_INDEX
) {
526 pos
= vdev
->pdev
->msi_cap
;
528 pci_read_config_word(vdev
->pdev
,
529 pos
+ PCI_MSI_FLAGS
, &flags
);
530 return 1 << ((flags
& PCI_MSI_FLAGS_QMASK
) >> 1);
532 } else if (irq_type
== VFIO_PCI_MSIX_IRQ_INDEX
) {
536 pos
= vdev
->pdev
->msix_cap
;
538 pci_read_config_word(vdev
->pdev
,
539 pos
+ PCI_MSIX_FLAGS
, &flags
);
541 return (flags
& PCI_MSIX_FLAGS_QSIZE
) + 1;
543 } else if (irq_type
== VFIO_PCI_ERR_IRQ_INDEX
) {
544 if (pci_is_pcie(vdev
->pdev
))
546 } else if (irq_type
== VFIO_PCI_REQ_IRQ_INDEX
) {
553 static int vfio_pci_count_devs(struct pci_dev
*pdev
, void *data
)
559 struct vfio_pci_fill_info
{
562 struct vfio_pci_dependent_device
*devices
;
565 static int vfio_pci_fill_devs(struct pci_dev
*pdev
, void *data
)
567 struct vfio_pci_fill_info
*fill
= data
;
568 struct iommu_group
*iommu_group
;
570 if (fill
->cur
== fill
->max
)
571 return -EAGAIN
; /* Something changed, try again */
573 iommu_group
= iommu_group_get(&pdev
->dev
);
575 return -EPERM
; /* Cannot reset non-isolated devices */
577 fill
->devices
[fill
->cur
].group_id
= iommu_group_id(iommu_group
);
578 fill
->devices
[fill
->cur
].segment
= pci_domain_nr(pdev
->bus
);
579 fill
->devices
[fill
->cur
].bus
= pdev
->bus
->number
;
580 fill
->devices
[fill
->cur
].devfn
= pdev
->devfn
;
582 iommu_group_put(iommu_group
);
586 struct vfio_pci_group_entry
{
587 struct vfio_group
*group
;
591 struct vfio_pci_group_info
{
593 struct vfio_pci_group_entry
*groups
;
596 static int vfio_pci_validate_devs(struct pci_dev
*pdev
, void *data
)
598 struct vfio_pci_group_info
*info
= data
;
599 struct iommu_group
*group
;
602 group
= iommu_group_get(&pdev
->dev
);
606 id
= iommu_group_id(group
);
608 for (i
= 0; i
< info
->count
; i
++)
609 if (info
->groups
[i
].id
== id
)
612 iommu_group_put(group
);
614 return (i
== info
->count
) ? -EINVAL
: 0;
617 static bool vfio_pci_dev_below_slot(struct pci_dev
*pdev
, struct pci_slot
*slot
)
619 for (; pdev
; pdev
= pdev
->bus
->self
)
620 if (pdev
->bus
== slot
->bus
)
621 return (pdev
->slot
== slot
);
625 struct vfio_pci_walk_info
{
626 int (*fn
)(struct pci_dev
*, void *data
);
628 struct pci_dev
*pdev
;
633 static int vfio_pci_walk_wrapper(struct pci_dev
*pdev
, void *data
)
635 struct vfio_pci_walk_info
*walk
= data
;
637 if (!walk
->slot
|| vfio_pci_dev_below_slot(pdev
, walk
->pdev
->slot
))
638 walk
->ret
= walk
->fn(pdev
, walk
->data
);
643 static int vfio_pci_for_each_slot_or_bus(struct pci_dev
*pdev
,
644 int (*fn
)(struct pci_dev
*,
645 void *data
), void *data
,
648 struct vfio_pci_walk_info walk
= {
649 .fn
= fn
, .data
= data
, .pdev
= pdev
, .slot
= slot
, .ret
= 0,
652 pci_walk_bus(pdev
->bus
, vfio_pci_walk_wrapper
, &walk
);
657 static int msix_mmappable_cap(struct vfio_pci_device
*vdev
,
658 struct vfio_info_cap
*caps
)
660 struct vfio_info_cap_header header
= {
661 .id
= VFIO_REGION_INFO_CAP_MSIX_MAPPABLE
,
665 return vfio_info_add_capability(caps
, &header
, sizeof(header
));
668 int vfio_pci_register_dev_region(struct vfio_pci_device
*vdev
,
669 unsigned int type
, unsigned int subtype
,
670 const struct vfio_pci_regops
*ops
,
671 size_t size
, u32 flags
, void *data
)
673 struct vfio_pci_region
*region
;
675 region
= krealloc(vdev
->region
,
676 (vdev
->num_regions
+ 1) * sizeof(*region
),
681 vdev
->region
= region
;
682 vdev
->region
[vdev
->num_regions
].type
= type
;
683 vdev
->region
[vdev
->num_regions
].subtype
= subtype
;
684 vdev
->region
[vdev
->num_regions
].ops
= ops
;
685 vdev
->region
[vdev
->num_regions
].size
= size
;
686 vdev
->region
[vdev
->num_regions
].flags
= flags
;
687 vdev
->region
[vdev
->num_regions
].data
= data
;
694 static long vfio_pci_ioctl(void *device_data
,
695 unsigned int cmd
, unsigned long arg
)
697 struct vfio_pci_device
*vdev
= device_data
;
700 if (cmd
== VFIO_DEVICE_GET_INFO
) {
701 struct vfio_device_info info
;
703 minsz
= offsetofend(struct vfio_device_info
, num_irqs
);
705 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
708 if (info
.argsz
< minsz
)
711 info
.flags
= VFIO_DEVICE_FLAGS_PCI
;
713 if (vdev
->reset_works
)
714 info
.flags
|= VFIO_DEVICE_FLAGS_RESET
;
716 info
.num_regions
= VFIO_PCI_NUM_REGIONS
+ vdev
->num_regions
;
717 info
.num_irqs
= VFIO_PCI_NUM_IRQS
;
719 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
722 } else if (cmd
== VFIO_DEVICE_GET_REGION_INFO
) {
723 struct pci_dev
*pdev
= vdev
->pdev
;
724 struct vfio_region_info info
;
725 struct vfio_info_cap caps
= { .buf
= NULL
, .size
= 0 };
728 minsz
= offsetofend(struct vfio_region_info
, offset
);
730 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
733 if (info
.argsz
< minsz
)
736 switch (info
.index
) {
737 case VFIO_PCI_CONFIG_REGION_INDEX
:
738 info
.offset
= VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
739 info
.size
= pdev
->cfg_size
;
740 info
.flags
= VFIO_REGION_INFO_FLAG_READ
|
741 VFIO_REGION_INFO_FLAG_WRITE
;
743 case VFIO_PCI_BAR0_REGION_INDEX
... VFIO_PCI_BAR5_REGION_INDEX
:
744 info
.offset
= VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
745 info
.size
= pci_resource_len(pdev
, info
.index
);
751 info
.flags
= VFIO_REGION_INFO_FLAG_READ
|
752 VFIO_REGION_INFO_FLAG_WRITE
;
753 if (vdev
->bar_mmap_supported
[info
.index
]) {
754 info
.flags
|= VFIO_REGION_INFO_FLAG_MMAP
;
755 if (info
.index
== vdev
->msix_bar
) {
756 ret
= msix_mmappable_cap(vdev
, &caps
);
763 case VFIO_PCI_ROM_REGION_INDEX
:
769 info
.offset
= VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
772 /* Report the BAR size, not the ROM size */
773 info
.size
= pci_resource_len(pdev
, info
.index
);
775 /* Shadow ROMs appear as PCI option ROMs */
776 if (pdev
->resource
[PCI_ROM_RESOURCE
].flags
&
777 IORESOURCE_ROM_SHADOW
)
784 * Is it really there? Enable memory decode for
785 * implicit access in pci_map_rom().
787 pci_read_config_word(pdev
, PCI_COMMAND
, &orig_cmd
);
788 pci_write_config_word(pdev
, PCI_COMMAND
,
789 orig_cmd
| PCI_COMMAND_MEMORY
);
791 io
= pci_map_rom(pdev
, &size
);
793 info
.flags
= VFIO_REGION_INFO_FLAG_READ
;
794 pci_unmap_rom(pdev
, io
);
799 pci_write_config_word(pdev
, PCI_COMMAND
, orig_cmd
);
802 case VFIO_PCI_VGA_REGION_INDEX
:
806 info
.offset
= VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
808 info
.flags
= VFIO_REGION_INFO_FLAG_READ
|
809 VFIO_REGION_INFO_FLAG_WRITE
;
814 struct vfio_region_info_cap_type cap_type
= {
815 .header
.id
= VFIO_REGION_INFO_CAP_TYPE
,
816 .header
.version
= 1 };
819 VFIO_PCI_NUM_REGIONS
+ vdev
->num_regions
)
821 info
.index
= array_index_nospec(info
.index
,
822 VFIO_PCI_NUM_REGIONS
+
825 i
= info
.index
- VFIO_PCI_NUM_REGIONS
;
827 info
.offset
= VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
828 info
.size
= vdev
->region
[i
].size
;
829 info
.flags
= vdev
->region
[i
].flags
;
831 cap_type
.type
= vdev
->region
[i
].type
;
832 cap_type
.subtype
= vdev
->region
[i
].subtype
;
834 ret
= vfio_info_add_capability(&caps
, &cap_type
.header
,
839 if (vdev
->region
[i
].ops
->add_capability
) {
840 ret
= vdev
->region
[i
].ops
->add_capability(vdev
,
841 &vdev
->region
[i
], &caps
);
849 info
.flags
|= VFIO_REGION_INFO_FLAG_CAPS
;
850 if (info
.argsz
< sizeof(info
) + caps
.size
) {
851 info
.argsz
= sizeof(info
) + caps
.size
;
854 vfio_info_cap_shift(&caps
, sizeof(info
));
855 if (copy_to_user((void __user
*)arg
+
856 sizeof(info
), caps
.buf
,
861 info
.cap_offset
= sizeof(info
);
867 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
870 } else if (cmd
== VFIO_DEVICE_GET_IRQ_INFO
) {
871 struct vfio_irq_info info
;
873 minsz
= offsetofend(struct vfio_irq_info
, count
);
875 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
878 if (info
.argsz
< minsz
|| info
.index
>= VFIO_PCI_NUM_IRQS
)
881 switch (info
.index
) {
882 case VFIO_PCI_INTX_IRQ_INDEX
... VFIO_PCI_MSIX_IRQ_INDEX
:
883 case VFIO_PCI_REQ_IRQ_INDEX
:
885 case VFIO_PCI_ERR_IRQ_INDEX
:
886 if (pci_is_pcie(vdev
->pdev
))
893 info
.flags
= VFIO_IRQ_INFO_EVENTFD
;
895 info
.count
= vfio_pci_get_irq_count(vdev
, info
.index
);
897 if (info
.index
== VFIO_PCI_INTX_IRQ_INDEX
)
898 info
.flags
|= (VFIO_IRQ_INFO_MASKABLE
|
899 VFIO_IRQ_INFO_AUTOMASKED
);
901 info
.flags
|= VFIO_IRQ_INFO_NORESIZE
;
903 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
906 } else if (cmd
== VFIO_DEVICE_SET_IRQS
) {
907 struct vfio_irq_set hdr
;
910 size_t data_size
= 0;
912 minsz
= offsetofend(struct vfio_irq_set
, count
);
914 if (copy_from_user(&hdr
, (void __user
*)arg
, minsz
))
917 max
= vfio_pci_get_irq_count(vdev
, hdr
.index
);
919 ret
= vfio_set_irqs_validate_and_prepare(&hdr
, max
,
920 VFIO_PCI_NUM_IRQS
, &data_size
);
925 data
= memdup_user((void __user
*)(arg
+ minsz
),
928 return PTR_ERR(data
);
931 mutex_lock(&vdev
->igate
);
933 ret
= vfio_pci_set_irqs_ioctl(vdev
, hdr
.flags
, hdr
.index
,
934 hdr
.start
, hdr
.count
, data
);
936 mutex_unlock(&vdev
->igate
);
941 } else if (cmd
== VFIO_DEVICE_RESET
) {
942 return vdev
->reset_works
?
943 pci_try_reset_function(vdev
->pdev
) : -EINVAL
;
945 } else if (cmd
== VFIO_DEVICE_GET_PCI_HOT_RESET_INFO
) {
946 struct vfio_pci_hot_reset_info hdr
;
947 struct vfio_pci_fill_info fill
= { 0 };
948 struct vfio_pci_dependent_device
*devices
= NULL
;
952 minsz
= offsetofend(struct vfio_pci_hot_reset_info
, count
);
954 if (copy_from_user(&hdr
, (void __user
*)arg
, minsz
))
957 if (hdr
.argsz
< minsz
)
962 /* Can we do a slot or bus reset or neither? */
963 if (!pci_probe_reset_slot(vdev
->pdev
->slot
))
965 else if (pci_probe_reset_bus(vdev
->pdev
->bus
))
968 /* How many devices are affected? */
969 ret
= vfio_pci_for_each_slot_or_bus(vdev
->pdev
,
975 WARN_ON(!fill
.max
); /* Should always be at least one */
978 * If there's enough space, fill it now, otherwise return
979 * -ENOSPC and the number of devices affected.
981 if (hdr
.argsz
< sizeof(hdr
) + (fill
.max
* sizeof(*devices
))) {
983 hdr
.count
= fill
.max
;
984 goto reset_info_exit
;
987 devices
= kcalloc(fill
.max
, sizeof(*devices
), GFP_KERNEL
);
991 fill
.devices
= devices
;
993 ret
= vfio_pci_for_each_slot_or_bus(vdev
->pdev
,
998 * If a device was removed between counting and filling,
999 * we may come up short of fill.max. If a device was
1000 * added, we'll have a return of -EAGAIN above.
1003 hdr
.count
= fill
.cur
;
1006 if (copy_to_user((void __user
*)arg
, &hdr
, minsz
))
1010 if (copy_to_user((void __user
*)(arg
+ minsz
), devices
,
1011 hdr
.count
* sizeof(*devices
)))
1018 } else if (cmd
== VFIO_DEVICE_PCI_HOT_RESET
) {
1019 struct vfio_pci_hot_reset hdr
;
1021 struct vfio_pci_group_entry
*groups
;
1022 struct vfio_pci_group_info info
;
1024 int i
, count
= 0, ret
= 0;
1026 minsz
= offsetofend(struct vfio_pci_hot_reset
, count
);
1028 if (copy_from_user(&hdr
, (void __user
*)arg
, minsz
))
1031 if (hdr
.argsz
< minsz
|| hdr
.flags
)
1034 /* Can we do a slot or bus reset or neither? */
1035 if (!pci_probe_reset_slot(vdev
->pdev
->slot
))
1037 else if (pci_probe_reset_bus(vdev
->pdev
->bus
))
1041 * We can't let userspace give us an arbitrarily large
1042 * buffer to copy, so verify how many we think there
1043 * could be. Note groups can have multiple devices so
1044 * one group per device is the max.
1046 ret
= vfio_pci_for_each_slot_or_bus(vdev
->pdev
,
1047 vfio_pci_count_devs
,
1052 /* Somewhere between 1 and count is OK */
1053 if (!hdr
.count
|| hdr
.count
> count
)
1056 group_fds
= kcalloc(hdr
.count
, sizeof(*group_fds
), GFP_KERNEL
);
1057 groups
= kcalloc(hdr
.count
, sizeof(*groups
), GFP_KERNEL
);
1058 if (!group_fds
|| !groups
) {
1064 if (copy_from_user(group_fds
, (void __user
*)(arg
+ minsz
),
1065 hdr
.count
* sizeof(*group_fds
))) {
1072 * For each group_fd, get the group through the vfio external
1073 * user interface and store the group and iommu ID. This
1074 * ensures the group is held across the reset.
1076 for (i
= 0; i
< hdr
.count
; i
++) {
1077 struct vfio_group
*group
;
1078 struct fd f
= fdget(group_fds
[i
]);
1084 group
= vfio_group_get_external_user(f
.file
);
1086 if (IS_ERR(group
)) {
1087 ret
= PTR_ERR(group
);
1091 groups
[i
].group
= group
;
1092 groups
[i
].id
= vfio_external_user_iommu_id(group
);
1097 /* release reference to groups on error */
1099 goto hot_reset_release
;
1101 info
.count
= hdr
.count
;
1102 info
.groups
= groups
;
1105 * Test whether all the affected devices are contained
1106 * by the set of groups provided by the user.
1108 ret
= vfio_pci_for_each_slot_or_bus(vdev
->pdev
,
1109 vfio_pci_validate_devs
,
1112 /* User has access, do the reset */
1113 ret
= pci_reset_bus(vdev
->pdev
);
1116 for (i
--; i
>= 0; i
--)
1117 vfio_group_put_external_user(groups
[i
].group
);
1121 } else if (cmd
== VFIO_DEVICE_IOEVENTFD
) {
1122 struct vfio_device_ioeventfd ioeventfd
;
1125 minsz
= offsetofend(struct vfio_device_ioeventfd
, fd
);
1127 if (copy_from_user(&ioeventfd
, (void __user
*)arg
, minsz
))
1130 if (ioeventfd
.argsz
< minsz
)
1133 if (ioeventfd
.flags
& ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK
)
1136 count
= ioeventfd
.flags
& VFIO_DEVICE_IOEVENTFD_SIZE_MASK
;
1138 if (hweight8(count
) != 1 || ioeventfd
.fd
< -1)
1141 return vfio_pci_ioeventfd(vdev
, ioeventfd
.offset
,
1142 ioeventfd
.data
, count
, ioeventfd
.fd
);
1148 static ssize_t
vfio_pci_rw(void *device_data
, char __user
*buf
,
1149 size_t count
, loff_t
*ppos
, bool iswrite
)
1151 unsigned int index
= VFIO_PCI_OFFSET_TO_INDEX(*ppos
);
1152 struct vfio_pci_device
*vdev
= device_data
;
1154 if (index
>= VFIO_PCI_NUM_REGIONS
+ vdev
->num_regions
)
1158 case VFIO_PCI_CONFIG_REGION_INDEX
:
1159 return vfio_pci_config_rw(vdev
, buf
, count
, ppos
, iswrite
);
1161 case VFIO_PCI_ROM_REGION_INDEX
:
1164 return vfio_pci_bar_rw(vdev
, buf
, count
, ppos
, false);
1166 case VFIO_PCI_BAR0_REGION_INDEX
... VFIO_PCI_BAR5_REGION_INDEX
:
1167 return vfio_pci_bar_rw(vdev
, buf
, count
, ppos
, iswrite
);
1169 case VFIO_PCI_VGA_REGION_INDEX
:
1170 return vfio_pci_vga_rw(vdev
, buf
, count
, ppos
, iswrite
);
1172 index
-= VFIO_PCI_NUM_REGIONS
;
1173 return vdev
->region
[index
].ops
->rw(vdev
, buf
,
1174 count
, ppos
, iswrite
);
1180 static ssize_t
vfio_pci_read(void *device_data
, char __user
*buf
,
1181 size_t count
, loff_t
*ppos
)
1186 return vfio_pci_rw(device_data
, buf
, count
, ppos
, false);
1189 static ssize_t
vfio_pci_write(void *device_data
, const char __user
*buf
,
1190 size_t count
, loff_t
*ppos
)
1195 return vfio_pci_rw(device_data
, (char __user
*)buf
, count
, ppos
, true);
1198 static int vfio_pci_mmap(void *device_data
, struct vm_area_struct
*vma
)
1200 struct vfio_pci_device
*vdev
= device_data
;
1201 struct pci_dev
*pdev
= vdev
->pdev
;
1203 u64 phys_len
, req_len
, pgoff
, req_start
;
1206 index
= vma
->vm_pgoff
>> (VFIO_PCI_OFFSET_SHIFT
- PAGE_SHIFT
);
1208 if (vma
->vm_end
< vma
->vm_start
)
1210 if ((vma
->vm_flags
& VM_SHARED
) == 0)
1212 if (index
>= VFIO_PCI_NUM_REGIONS
) {
1213 int regnum
= index
- VFIO_PCI_NUM_REGIONS
;
1214 struct vfio_pci_region
*region
= vdev
->region
+ regnum
;
1216 if (region
&& region
->ops
&& region
->ops
->mmap
&&
1217 (region
->flags
& VFIO_REGION_INFO_FLAG_MMAP
))
1218 return region
->ops
->mmap(vdev
, region
, vma
);
1221 if (index
>= VFIO_PCI_ROM_REGION_INDEX
)
1223 if (!vdev
->bar_mmap_supported
[index
])
1226 phys_len
= PAGE_ALIGN(pci_resource_len(pdev
, index
));
1227 req_len
= vma
->vm_end
- vma
->vm_start
;
1228 pgoff
= vma
->vm_pgoff
&
1229 ((1U << (VFIO_PCI_OFFSET_SHIFT
- PAGE_SHIFT
)) - 1);
1230 req_start
= pgoff
<< PAGE_SHIFT
;
1232 if (req_start
+ req_len
> phys_len
)
1236 * Even though we don't make use of the barmap for the mmap,
1237 * we need to request the region and the barmap tracks that.
1239 if (!vdev
->barmap
[index
]) {
1240 ret
= pci_request_selected_regions(pdev
,
1241 1 << index
, "vfio-pci");
1245 vdev
->barmap
[index
] = pci_iomap(pdev
, index
, 0);
1246 if (!vdev
->barmap
[index
]) {
1247 pci_release_selected_regions(pdev
, 1 << index
);
1252 vma
->vm_private_data
= vdev
;
1253 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1254 vma
->vm_pgoff
= (pci_resource_start(pdev
, index
) >> PAGE_SHIFT
) + pgoff
;
1256 return remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
1257 req_len
, vma
->vm_page_prot
);
1260 static void vfio_pci_request(void *device_data
, unsigned int count
)
1262 struct vfio_pci_device
*vdev
= device_data
;
1263 struct pci_dev
*pdev
= vdev
->pdev
;
1265 mutex_lock(&vdev
->igate
);
1267 if (vdev
->req_trigger
) {
1269 pci_notice_ratelimited(pdev
,
1270 "Relaying device request to user (#%u)\n",
1272 eventfd_signal(vdev
->req_trigger
, 1);
1273 } else if (count
== 0) {
1275 "No device request channel registered, blocked until released by user\n");
1278 mutex_unlock(&vdev
->igate
);
1281 static const struct vfio_device_ops vfio_pci_ops
= {
1283 .open
= vfio_pci_open
,
1284 .release
= vfio_pci_release
,
1285 .ioctl
= vfio_pci_ioctl
,
1286 .read
= vfio_pci_read
,
1287 .write
= vfio_pci_write
,
1288 .mmap
= vfio_pci_mmap
,
1289 .request
= vfio_pci_request
,
1292 static int vfio_pci_reflck_attach(struct vfio_pci_device
*vdev
);
1293 static void vfio_pci_reflck_put(struct vfio_pci_reflck
*reflck
);
1295 static int vfio_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
1297 struct vfio_pci_device
*vdev
;
1298 struct iommu_group
*group
;
1301 if (pdev
->hdr_type
!= PCI_HEADER_TYPE_NORMAL
)
1305 * Prevent binding to PFs with VFs enabled, this too easily allows
1306 * userspace instance with VFs and PFs from the same device, which
1307 * cannot work. Disabling SR-IOV here would initiate removing the
1308 * VFs, which would unbind the driver, which is prone to blocking
1309 * if that VF is also in use by vfio-pci. Just reject these PFs
1310 * and let the user sort it out.
1312 if (pci_num_vf(pdev
)) {
1313 pci_warn(pdev
, "Cannot bind to PF with SR-IOV enabled\n");
1317 group
= vfio_iommu_group_get(&pdev
->dev
);
1321 vdev
= kzalloc(sizeof(*vdev
), GFP_KERNEL
);
1323 vfio_iommu_group_put(group
, &pdev
->dev
);
1328 vdev
->irq_type
= VFIO_PCI_NUM_IRQS
;
1329 mutex_init(&vdev
->igate
);
1330 spin_lock_init(&vdev
->irqlock
);
1331 mutex_init(&vdev
->ioeventfds_lock
);
1332 INIT_LIST_HEAD(&vdev
->ioeventfds_list
);
1334 ret
= vfio_add_group_dev(&pdev
->dev
, &vfio_pci_ops
, vdev
);
1336 vfio_iommu_group_put(group
, &pdev
->dev
);
1341 ret
= vfio_pci_reflck_attach(vdev
);
1343 vfio_del_group_dev(&pdev
->dev
);
1344 vfio_iommu_group_put(group
, &pdev
->dev
);
1349 if (vfio_pci_is_vga(pdev
)) {
1350 vga_client_register(pdev
, vdev
, NULL
, vfio_pci_set_vga_decode
);
1351 vga_set_legacy_decoding(pdev
,
1352 vfio_pci_set_vga_decode(vdev
, false));
1355 vfio_pci_probe_power_state(vdev
);
1357 if (!disable_idle_d3
) {
1359 * pci-core sets the device power state to an unknown value at
1360 * bootup and after being removed from a driver. The only
1361 * transition it allows from this unknown state is to D0, which
1362 * typically happens when a driver calls pci_enable_device().
1363 * We're not ready to enable the device yet, but we do want to
1364 * be able to get to D3. Therefore first do a D0 transition
1365 * before going to D3.
1367 vfio_pci_set_power_state(vdev
, PCI_D0
);
1368 vfio_pci_set_power_state(vdev
, PCI_D3hot
);
1374 static void vfio_pci_remove(struct pci_dev
*pdev
)
1376 struct vfio_pci_device
*vdev
;
1378 vdev
= vfio_del_group_dev(&pdev
->dev
);
1382 vfio_pci_reflck_put(vdev
->reflck
);
1384 vfio_iommu_group_put(pdev
->dev
.iommu_group
, &pdev
->dev
);
1385 kfree(vdev
->region
);
1386 mutex_destroy(&vdev
->ioeventfds_lock
);
1388 if (!disable_idle_d3
)
1389 vfio_pci_set_power_state(vdev
, PCI_D0
);
1391 kfree(vdev
->pm_save
);
1394 if (vfio_pci_is_vga(pdev
)) {
1395 vga_client_register(pdev
, NULL
, NULL
, NULL
);
1396 vga_set_legacy_decoding(pdev
,
1397 VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
|
1398 VGA_RSRC_LEGACY_IO
| VGA_RSRC_LEGACY_MEM
);
1402 static pci_ers_result_t
vfio_pci_aer_err_detected(struct pci_dev
*pdev
,
1403 pci_channel_state_t state
)
1405 struct vfio_pci_device
*vdev
;
1406 struct vfio_device
*device
;
1408 device
= vfio_device_get_from_dev(&pdev
->dev
);
1410 return PCI_ERS_RESULT_DISCONNECT
;
1412 vdev
= vfio_device_data(device
);
1414 vfio_device_put(device
);
1415 return PCI_ERS_RESULT_DISCONNECT
;
1418 mutex_lock(&vdev
->igate
);
1420 if (vdev
->err_trigger
)
1421 eventfd_signal(vdev
->err_trigger
, 1);
1423 mutex_unlock(&vdev
->igate
);
1425 vfio_device_put(device
);
1427 return PCI_ERS_RESULT_CAN_RECOVER
;
1430 static const struct pci_error_handlers vfio_err_handlers
= {
1431 .error_detected
= vfio_pci_aer_err_detected
,
1434 static struct pci_driver vfio_pci_driver
= {
1436 .id_table
= NULL
, /* only dynamic ids */
1437 .probe
= vfio_pci_probe
,
1438 .remove
= vfio_pci_remove
,
1439 .err_handler
= &vfio_err_handlers
,
1442 static DEFINE_MUTEX(reflck_lock
);
1444 static struct vfio_pci_reflck
*vfio_pci_reflck_alloc(void)
1446 struct vfio_pci_reflck
*reflck
;
1448 reflck
= kzalloc(sizeof(*reflck
), GFP_KERNEL
);
1450 return ERR_PTR(-ENOMEM
);
1452 kref_init(&reflck
->kref
);
1453 mutex_init(&reflck
->lock
);
1458 static void vfio_pci_reflck_get(struct vfio_pci_reflck
*reflck
)
1460 kref_get(&reflck
->kref
);
1463 static int vfio_pci_reflck_find(struct pci_dev
*pdev
, void *data
)
1465 struct vfio_pci_reflck
**preflck
= data
;
1466 struct vfio_device
*device
;
1467 struct vfio_pci_device
*vdev
;
1469 device
= vfio_device_get_from_dev(&pdev
->dev
);
1473 if (pci_dev_driver(pdev
) != &vfio_pci_driver
) {
1474 vfio_device_put(device
);
1478 vdev
= vfio_device_data(device
);
1481 vfio_pci_reflck_get(vdev
->reflck
);
1482 *preflck
= vdev
->reflck
;
1483 vfio_device_put(device
);
1487 vfio_device_put(device
);
1491 static int vfio_pci_reflck_attach(struct vfio_pci_device
*vdev
)
1493 bool slot
= !pci_probe_reset_slot(vdev
->pdev
->slot
);
1495 mutex_lock(&reflck_lock
);
1497 if (pci_is_root_bus(vdev
->pdev
->bus
) ||
1498 vfio_pci_for_each_slot_or_bus(vdev
->pdev
, vfio_pci_reflck_find
,
1499 &vdev
->reflck
, slot
) <= 0)
1500 vdev
->reflck
= vfio_pci_reflck_alloc();
1502 mutex_unlock(&reflck_lock
);
1504 return PTR_ERR_OR_ZERO(vdev
->reflck
);
1507 static void vfio_pci_reflck_release(struct kref
*kref
)
1509 struct vfio_pci_reflck
*reflck
= container_of(kref
,
1510 struct vfio_pci_reflck
,
1514 mutex_unlock(&reflck_lock
);
1517 static void vfio_pci_reflck_put(struct vfio_pci_reflck
*reflck
)
1519 kref_put_mutex(&reflck
->kref
, vfio_pci_reflck_release
, &reflck_lock
);
1522 struct vfio_devices
{
1523 struct vfio_device
**devices
;
1528 static int vfio_pci_get_unused_devs(struct pci_dev
*pdev
, void *data
)
1530 struct vfio_devices
*devs
= data
;
1531 struct vfio_device
*device
;
1532 struct vfio_pci_device
*vdev
;
1534 if (devs
->cur_index
== devs
->max_index
)
1537 device
= vfio_device_get_from_dev(&pdev
->dev
);
1541 if (pci_dev_driver(pdev
) != &vfio_pci_driver
) {
1542 vfio_device_put(device
);
1546 vdev
= vfio_device_data(device
);
1548 /* Fault if the device is not unused */
1550 vfio_device_put(device
);
1554 devs
->devices
[devs
->cur_index
++] = device
;
1559 * If a bus or slot reset is available for the provided device and:
1560 * - All of the devices affected by that bus or slot reset are unused
1562 * - At least one of the affected devices is marked dirty via
1563 * needs_reset (such as by lack of FLR support)
1564 * Then attempt to perform that bus or slot reset. Callers are required
1565 * to hold vdev->reflck->lock, protecting the bus/slot reset group from
1566 * concurrent opens. A vfio_device reference is acquired for each device
1567 * to prevent unbinds during the reset operation.
1569 * NB: vfio-core considers a group to be viable even if some devices are
1570 * bound to drivers like pci-stub or pcieport. Here we require all devices
1571 * to be bound to vfio_pci since that's the only way we can be sure they
1574 static void vfio_pci_try_bus_reset(struct vfio_pci_device
*vdev
)
1576 struct vfio_devices devs
= { .cur_index
= 0 };
1577 int i
= 0, ret
= -EINVAL
;
1579 struct vfio_pci_device
*tmp
;
1581 if (!pci_probe_reset_slot(vdev
->pdev
->slot
))
1583 else if (pci_probe_reset_bus(vdev
->pdev
->bus
))
1586 if (vfio_pci_for_each_slot_or_bus(vdev
->pdev
, vfio_pci_count_devs
,
1591 devs
.devices
= kcalloc(i
, sizeof(struct vfio_device
*), GFP_KERNEL
);
1595 if (vfio_pci_for_each_slot_or_bus(vdev
->pdev
,
1596 vfio_pci_get_unused_devs
,
1600 /* Does at least one need a reset? */
1601 for (i
= 0; i
< devs
.cur_index
; i
++) {
1602 tmp
= vfio_device_data(devs
.devices
[i
]);
1603 if (tmp
->needs_reset
) {
1604 ret
= pci_reset_bus(vdev
->pdev
);
1610 for (i
= 0; i
< devs
.cur_index
; i
++) {
1611 tmp
= vfio_device_data(devs
.devices
[i
]);
1614 * If reset was successful, affected devices no longer need
1615 * a reset and we should return all the collateral devices
1616 * to low power. If not successful, we either didn't reset
1617 * the bus or timed out waiting for it, so let's not touch
1621 tmp
->needs_reset
= false;
1623 if (tmp
!= vdev
&& !disable_idle_d3
)
1624 vfio_pci_set_power_state(tmp
, PCI_D3hot
);
1627 vfio_device_put(devs
.devices
[i
]);
1630 kfree(devs
.devices
);
1633 static void __exit
vfio_pci_cleanup(void)
1635 pci_unregister_driver(&vfio_pci_driver
);
1636 vfio_pci_uninit_perm_bits();
1639 static void __init
vfio_pci_fill_ids(void)
1644 /* no ids passed actually */
1648 /* add ids specified in the module parameter */
1650 while ((id
= strsep(&p
, ","))) {
1651 unsigned int vendor
, device
, subvendor
= PCI_ANY_ID
,
1652 subdevice
= PCI_ANY_ID
, class = 0, class_mask
= 0;
1658 fields
= sscanf(id
, "%x:%x:%x:%x:%x:%x",
1659 &vendor
, &device
, &subvendor
, &subdevice
,
1660 &class, &class_mask
);
1663 pr_warn("invalid id string \"%s\"\n", id
);
1667 rc
= pci_add_dynid(&vfio_pci_driver
, vendor
, device
,
1668 subvendor
, subdevice
, class, class_mask
, 0);
1670 pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n",
1671 vendor
, device
, subvendor
, subdevice
,
1672 class, class_mask
, rc
);
1674 pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n",
1675 vendor
, device
, subvendor
, subdevice
,
1680 static int __init
vfio_pci_init(void)
1684 /* Allocate shared config space permision data used by all devices */
1685 ret
= vfio_pci_init_perm_bits();
1689 /* Register and scan for devices */
1690 ret
= pci_register_driver(&vfio_pci_driver
);
1694 vfio_pci_fill_ids();
1699 vfio_pci_uninit_perm_bits();
1703 module_init(vfio_pci_init
);
1704 module_exit(vfio_pci_cleanup
);
1706 MODULE_VERSION(DRIVER_VERSION
);
1707 MODULE_LICENSE("GPL v2");
1708 MODULE_AUTHOR(DRIVER_AUTHOR
);
1709 MODULE_DESCRIPTION(DRIVER_DESC
);