2 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
3 * Author: Alex Williamson <alex.williamson@redhat.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * Derived from original vfio:
10 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
11 * Author: Tom Lyon, pugs@cisco.com
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/device.h>
17 #include <linux/eventfd.h>
18 #include <linux/file.h>
19 #include <linux/interrupt.h>
20 #include <linux/iommu.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/notifier.h>
24 #include <linux/pci.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
29 #include <linux/vfio.h>
30 #include <linux/vgaarb.h>
32 #include "vfio_pci_private.h"
34 #define DRIVER_VERSION "0.2"
35 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
36 #define DRIVER_DESC "VFIO PCI - User Level meta-driver"
38 static char ids
[1024] __initdata
;
39 module_param_string(ids
, ids
, sizeof(ids
), 0);
40 MODULE_PARM_DESC(ids
, "Initial PCI IDs to add to the vfio driver, format is \"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\" and multiple comma separated entries can be specified");
42 static bool nointxmask
;
43 module_param_named(nointxmask
, nointxmask
, bool, S_IRUGO
| S_IWUSR
);
44 MODULE_PARM_DESC(nointxmask
,
45 "Disable support for PCI 2.3 style INTx masking. If this resolves problems for specific devices, report lspci -vvvxxx to linux-pci@vger.kernel.org so the device can be fixed automatically via the broken_intx_masking flag.");
47 #ifdef CONFIG_VFIO_PCI_VGA
48 static bool disable_vga
;
49 module_param(disable_vga
, bool, S_IRUGO
);
50 MODULE_PARM_DESC(disable_vga
, "Disable VGA resource access through vfio-pci");
53 static bool disable_idle_d3
;
54 module_param(disable_idle_d3
, bool, S_IRUGO
| S_IWUSR
);
55 MODULE_PARM_DESC(disable_idle_d3
,
56 "Disable using the PCI D3 low power state for idle, unused devices");
58 static DEFINE_MUTEX(driver_lock
);
60 static inline bool vfio_vga_disabled(void)
62 #ifdef CONFIG_VFIO_PCI_VGA
70 * Our VGA arbiter participation is limited since we don't know anything
71 * about the device itself. However, if the device is the only VGA device
72 * downstream of a bridge and VFIO VGA support is disabled, then we can
73 * safely return legacy VGA IO and memory as not decoded since the user
74 * has no way to get to it and routing can be disabled externally at the
77 static unsigned int vfio_pci_set_vga_decode(void *opaque
, bool single_vga
)
79 struct vfio_pci_device
*vdev
= opaque
;
80 struct pci_dev
*tmp
= NULL
, *pdev
= vdev
->pdev
;
81 unsigned char max_busnr
;
84 if (single_vga
|| !vfio_vga_disabled() || pci_is_root_bus(pdev
->bus
))
85 return VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
|
86 VGA_RSRC_LEGACY_IO
| VGA_RSRC_LEGACY_MEM
;
88 max_busnr
= pci_bus_max_busnr(pdev
->bus
);
89 decodes
= VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
91 while ((tmp
= pci_get_class(PCI_CLASS_DISPLAY_VGA
<< 8, tmp
)) != NULL
) {
93 pci_domain_nr(tmp
->bus
) != pci_domain_nr(pdev
->bus
) ||
94 pci_is_root_bus(tmp
->bus
))
97 if (tmp
->bus
->number
>= pdev
->bus
->number
&&
98 tmp
->bus
->number
<= max_busnr
) {
100 decodes
|= VGA_RSRC_LEGACY_IO
| VGA_RSRC_LEGACY_MEM
;
108 static inline bool vfio_pci_is_vga(struct pci_dev
*pdev
)
110 return (pdev
->class >> 8) == PCI_CLASS_DISPLAY_VGA
;
113 static void vfio_pci_try_bus_reset(struct vfio_pci_device
*vdev
);
115 static int vfio_pci_enable(struct vfio_pci_device
*vdev
)
117 struct pci_dev
*pdev
= vdev
->pdev
;
122 pci_set_power_state(pdev
, PCI_D0
);
124 /* Don't allow our initial saved state to include busmaster */
125 pci_clear_master(pdev
);
127 ret
= pci_enable_device(pdev
);
131 vdev
->reset_works
= (pci_reset_function(pdev
) == 0);
132 pci_save_state(pdev
);
133 vdev
->pci_saved_state
= pci_store_saved_state(pdev
);
134 if (!vdev
->pci_saved_state
)
135 pr_debug("%s: Couldn't store %s saved state\n",
136 __func__
, dev_name(&pdev
->dev
));
138 ret
= vfio_config_init(vdev
);
140 kfree(vdev
->pci_saved_state
);
141 vdev
->pci_saved_state
= NULL
;
142 pci_disable_device(pdev
);
146 if (likely(!nointxmask
))
147 vdev
->pci_2_3
= pci_intx_mask_supported(pdev
);
149 pci_read_config_word(pdev
, PCI_COMMAND
, &cmd
);
150 if (vdev
->pci_2_3
&& (cmd
& PCI_COMMAND_INTX_DISABLE
)) {
151 cmd
&= ~PCI_COMMAND_INTX_DISABLE
;
152 pci_write_config_word(pdev
, PCI_COMMAND
, cmd
);
155 msix_pos
= pdev
->msix_cap
;
160 pci_read_config_word(pdev
, msix_pos
+ PCI_MSIX_FLAGS
, &flags
);
161 pci_read_config_dword(pdev
, msix_pos
+ PCI_MSIX_TABLE
, &table
);
163 vdev
->msix_bar
= table
& PCI_MSIX_TABLE_BIR
;
164 vdev
->msix_offset
= table
& PCI_MSIX_TABLE_OFFSET
;
165 vdev
->msix_size
= ((flags
& PCI_MSIX_FLAGS_QSIZE
) + 1) * 16;
167 vdev
->msix_bar
= 0xFF;
169 if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev
))
170 vdev
->has_vga
= true;
175 static void vfio_pci_disable(struct vfio_pci_device
*vdev
)
177 struct pci_dev
*pdev
= vdev
->pdev
;
180 /* Stop the device from further DMA */
181 pci_clear_master(pdev
);
183 vfio_pci_set_irqs_ioctl(vdev
, VFIO_IRQ_SET_DATA_NONE
|
184 VFIO_IRQ_SET_ACTION_TRIGGER
,
185 vdev
->irq_type
, 0, 0, NULL
);
187 vdev
->virq_disabled
= false;
189 vfio_config_free(vdev
);
191 for (bar
= PCI_STD_RESOURCES
; bar
<= PCI_STD_RESOURCE_END
; bar
++) {
192 if (!vdev
->barmap
[bar
])
194 pci_iounmap(pdev
, vdev
->barmap
[bar
]);
195 pci_release_selected_regions(pdev
, 1 << bar
);
196 vdev
->barmap
[bar
] = NULL
;
199 vdev
->needs_reset
= true;
202 * If we have saved state, restore it. If we can reset the device,
203 * even better. Resetting with current state seems better than
204 * nothing, but saving and restoring current state without reset
207 if (pci_load_and_free_saved_state(pdev
, &vdev
->pci_saved_state
)) {
208 pr_info("%s: Couldn't reload %s saved state\n",
209 __func__
, dev_name(&pdev
->dev
));
211 if (!vdev
->reset_works
)
214 pci_save_state(pdev
);
218 * Disable INTx and MSI, presumably to avoid spurious interrupts
219 * during reset. Stolen from pci_reset_function()
221 pci_write_config_word(pdev
, PCI_COMMAND
, PCI_COMMAND_INTX_DISABLE
);
224 * Try to reset the device. The success of this is dependent on
225 * being able to lock the device, which is not always possible.
227 if (vdev
->reset_works
&& !pci_try_reset_function(pdev
))
228 vdev
->needs_reset
= false;
230 pci_restore_state(pdev
);
232 pci_disable_device(pdev
);
234 vfio_pci_try_bus_reset(vdev
);
236 if (!disable_idle_d3
)
237 pci_set_power_state(pdev
, PCI_D3hot
);
240 static void vfio_pci_release(void *device_data
)
242 struct vfio_pci_device
*vdev
= device_data
;
244 mutex_lock(&driver_lock
);
246 if (!(--vdev
->refcnt
)) {
247 vfio_spapr_pci_eeh_release(vdev
->pdev
);
248 vfio_pci_disable(vdev
);
251 mutex_unlock(&driver_lock
);
253 module_put(THIS_MODULE
);
256 static int vfio_pci_open(void *device_data
)
258 struct vfio_pci_device
*vdev
= device_data
;
261 if (!try_module_get(THIS_MODULE
))
264 mutex_lock(&driver_lock
);
267 ret
= vfio_pci_enable(vdev
);
271 vfio_spapr_pci_eeh_open(vdev
->pdev
);
275 mutex_unlock(&driver_lock
);
277 module_put(THIS_MODULE
);
281 static int vfio_pci_get_irq_count(struct vfio_pci_device
*vdev
, int irq_type
)
283 if (irq_type
== VFIO_PCI_INTX_IRQ_INDEX
) {
285 pci_read_config_byte(vdev
->pdev
, PCI_INTERRUPT_PIN
, &pin
);
286 if (IS_ENABLED(CONFIG_VFIO_PCI_INTX
) && pin
)
289 } else if (irq_type
== VFIO_PCI_MSI_IRQ_INDEX
) {
293 pos
= vdev
->pdev
->msi_cap
;
295 pci_read_config_word(vdev
->pdev
,
296 pos
+ PCI_MSI_FLAGS
, &flags
);
297 return 1 << ((flags
& PCI_MSI_FLAGS_QMASK
) >> 1);
299 } else if (irq_type
== VFIO_PCI_MSIX_IRQ_INDEX
) {
303 pos
= vdev
->pdev
->msix_cap
;
305 pci_read_config_word(vdev
->pdev
,
306 pos
+ PCI_MSIX_FLAGS
, &flags
);
308 return (flags
& PCI_MSIX_FLAGS_QSIZE
) + 1;
310 } else if (irq_type
== VFIO_PCI_ERR_IRQ_INDEX
) {
311 if (pci_is_pcie(vdev
->pdev
))
313 } else if (irq_type
== VFIO_PCI_REQ_IRQ_INDEX
) {
320 static int vfio_pci_count_devs(struct pci_dev
*pdev
, void *data
)
326 struct vfio_pci_fill_info
{
329 struct vfio_pci_dependent_device
*devices
;
332 static int vfio_pci_fill_devs(struct pci_dev
*pdev
, void *data
)
334 struct vfio_pci_fill_info
*fill
= data
;
335 struct iommu_group
*iommu_group
;
337 if (fill
->cur
== fill
->max
)
338 return -EAGAIN
; /* Something changed, try again */
340 iommu_group
= iommu_group_get(&pdev
->dev
);
342 return -EPERM
; /* Cannot reset non-isolated devices */
344 fill
->devices
[fill
->cur
].group_id
= iommu_group_id(iommu_group
);
345 fill
->devices
[fill
->cur
].segment
= pci_domain_nr(pdev
->bus
);
346 fill
->devices
[fill
->cur
].bus
= pdev
->bus
->number
;
347 fill
->devices
[fill
->cur
].devfn
= pdev
->devfn
;
349 iommu_group_put(iommu_group
);
353 struct vfio_pci_group_entry
{
354 struct vfio_group
*group
;
358 struct vfio_pci_group_info
{
360 struct vfio_pci_group_entry
*groups
;
363 static int vfio_pci_validate_devs(struct pci_dev
*pdev
, void *data
)
365 struct vfio_pci_group_info
*info
= data
;
366 struct iommu_group
*group
;
369 group
= iommu_group_get(&pdev
->dev
);
373 id
= iommu_group_id(group
);
375 for (i
= 0; i
< info
->count
; i
++)
376 if (info
->groups
[i
].id
== id
)
379 iommu_group_put(group
);
381 return (i
== info
->count
) ? -EINVAL
: 0;
384 static bool vfio_pci_dev_below_slot(struct pci_dev
*pdev
, struct pci_slot
*slot
)
386 for (; pdev
; pdev
= pdev
->bus
->self
)
387 if (pdev
->bus
== slot
->bus
)
388 return (pdev
->slot
== slot
);
392 struct vfio_pci_walk_info
{
393 int (*fn
)(struct pci_dev
*, void *data
);
395 struct pci_dev
*pdev
;
400 static int vfio_pci_walk_wrapper(struct pci_dev
*pdev
, void *data
)
402 struct vfio_pci_walk_info
*walk
= data
;
404 if (!walk
->slot
|| vfio_pci_dev_below_slot(pdev
, walk
->pdev
->slot
))
405 walk
->ret
= walk
->fn(pdev
, walk
->data
);
410 static int vfio_pci_for_each_slot_or_bus(struct pci_dev
*pdev
,
411 int (*fn
)(struct pci_dev
*,
412 void *data
), void *data
,
415 struct vfio_pci_walk_info walk
= {
416 .fn
= fn
, .data
= data
, .pdev
= pdev
, .slot
= slot
, .ret
= 0,
419 pci_walk_bus(pdev
->bus
, vfio_pci_walk_wrapper
, &walk
);
424 static long vfio_pci_ioctl(void *device_data
,
425 unsigned int cmd
, unsigned long arg
)
427 struct vfio_pci_device
*vdev
= device_data
;
430 if (cmd
== VFIO_DEVICE_GET_INFO
) {
431 struct vfio_device_info info
;
433 minsz
= offsetofend(struct vfio_device_info
, num_irqs
);
435 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
438 if (info
.argsz
< minsz
)
441 info
.flags
= VFIO_DEVICE_FLAGS_PCI
;
443 if (vdev
->reset_works
)
444 info
.flags
|= VFIO_DEVICE_FLAGS_RESET
;
446 info
.num_regions
= VFIO_PCI_NUM_REGIONS
;
447 info
.num_irqs
= VFIO_PCI_NUM_IRQS
;
449 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
452 } else if (cmd
== VFIO_DEVICE_GET_REGION_INFO
) {
453 struct pci_dev
*pdev
= vdev
->pdev
;
454 struct vfio_region_info info
;
456 minsz
= offsetofend(struct vfio_region_info
, offset
);
458 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
461 if (info
.argsz
< minsz
)
464 switch (info
.index
) {
465 case VFIO_PCI_CONFIG_REGION_INDEX
:
466 info
.offset
= VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
467 info
.size
= pdev
->cfg_size
;
468 info
.flags
= VFIO_REGION_INFO_FLAG_READ
|
469 VFIO_REGION_INFO_FLAG_WRITE
;
471 case VFIO_PCI_BAR0_REGION_INDEX
... VFIO_PCI_BAR5_REGION_INDEX
:
472 info
.offset
= VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
473 info
.size
= pci_resource_len(pdev
, info
.index
);
479 info
.flags
= VFIO_REGION_INFO_FLAG_READ
|
480 VFIO_REGION_INFO_FLAG_WRITE
;
481 if (IS_ENABLED(CONFIG_VFIO_PCI_MMAP
) &&
482 pci_resource_flags(pdev
, info
.index
) &
483 IORESOURCE_MEM
&& info
.size
>= PAGE_SIZE
)
484 info
.flags
|= VFIO_REGION_INFO_FLAG_MMAP
;
486 case VFIO_PCI_ROM_REGION_INDEX
:
491 info
.offset
= VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
494 /* Report the BAR size, not the ROM size */
495 info
.size
= pci_resource_len(pdev
, info
.index
);
499 /* Is it really there? */
500 io
= pci_map_rom(pdev
, &size
);
505 pci_unmap_rom(pdev
, io
);
507 info
.flags
= VFIO_REGION_INFO_FLAG_READ
;
510 case VFIO_PCI_VGA_REGION_INDEX
:
514 info
.offset
= VFIO_PCI_INDEX_TO_OFFSET(info
.index
);
516 info
.flags
= VFIO_REGION_INFO_FLAG_READ
|
517 VFIO_REGION_INFO_FLAG_WRITE
;
524 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
527 } else if (cmd
== VFIO_DEVICE_GET_IRQ_INFO
) {
528 struct vfio_irq_info info
;
530 minsz
= offsetofend(struct vfio_irq_info
, count
);
532 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
535 if (info
.argsz
< minsz
|| info
.index
>= VFIO_PCI_NUM_IRQS
)
538 switch (info
.index
) {
539 case VFIO_PCI_INTX_IRQ_INDEX
... VFIO_PCI_MSIX_IRQ_INDEX
:
540 case VFIO_PCI_REQ_IRQ_INDEX
:
542 case VFIO_PCI_ERR_IRQ_INDEX
:
543 if (pci_is_pcie(vdev
->pdev
))
545 /* pass thru to return error */
550 info
.flags
= VFIO_IRQ_INFO_EVENTFD
;
552 info
.count
= vfio_pci_get_irq_count(vdev
, info
.index
);
554 if (info
.index
== VFIO_PCI_INTX_IRQ_INDEX
)
555 info
.flags
|= (VFIO_IRQ_INFO_MASKABLE
|
556 VFIO_IRQ_INFO_AUTOMASKED
);
558 info
.flags
|= VFIO_IRQ_INFO_NORESIZE
;
560 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
563 } else if (cmd
== VFIO_DEVICE_SET_IRQS
) {
564 struct vfio_irq_set hdr
;
569 minsz
= offsetofend(struct vfio_irq_set
, count
);
571 if (copy_from_user(&hdr
, (void __user
*)arg
, minsz
))
574 if (hdr
.argsz
< minsz
|| hdr
.index
>= VFIO_PCI_NUM_IRQS
||
575 hdr
.count
>= (U32_MAX
- hdr
.start
) ||
576 hdr
.flags
& ~(VFIO_IRQ_SET_DATA_TYPE_MASK
|
577 VFIO_IRQ_SET_ACTION_TYPE_MASK
))
580 max
= vfio_pci_get_irq_count(vdev
, hdr
.index
);
581 if (hdr
.start
>= max
|| hdr
.start
+ hdr
.count
> max
)
584 switch (hdr
.flags
& VFIO_IRQ_SET_DATA_TYPE_MASK
) {
585 case VFIO_IRQ_SET_DATA_NONE
:
588 case VFIO_IRQ_SET_DATA_BOOL
:
589 size
= sizeof(uint8_t);
591 case VFIO_IRQ_SET_DATA_EVENTFD
:
592 size
= sizeof(int32_t);
599 if (hdr
.argsz
- minsz
< hdr
.count
* size
)
602 data
= memdup_user((void __user
*)(arg
+ minsz
),
605 return PTR_ERR(data
);
608 mutex_lock(&vdev
->igate
);
610 ret
= vfio_pci_set_irqs_ioctl(vdev
, hdr
.flags
, hdr
.index
,
611 hdr
.start
, hdr
.count
, data
);
613 mutex_unlock(&vdev
->igate
);
618 } else if (cmd
== VFIO_DEVICE_RESET
) {
619 return vdev
->reset_works
?
620 pci_try_reset_function(vdev
->pdev
) : -EINVAL
;
622 } else if (cmd
== VFIO_DEVICE_GET_PCI_HOT_RESET_INFO
) {
623 struct vfio_pci_hot_reset_info hdr
;
624 struct vfio_pci_fill_info fill
= { 0 };
625 struct vfio_pci_dependent_device
*devices
= NULL
;
629 minsz
= offsetofend(struct vfio_pci_hot_reset_info
, count
);
631 if (copy_from_user(&hdr
, (void __user
*)arg
, minsz
))
634 if (hdr
.argsz
< minsz
)
639 /* Can we do a slot or bus reset or neither? */
640 if (!pci_probe_reset_slot(vdev
->pdev
->slot
))
642 else if (pci_probe_reset_bus(vdev
->pdev
->bus
))
645 /* How many devices are affected? */
646 ret
= vfio_pci_for_each_slot_or_bus(vdev
->pdev
,
652 WARN_ON(!fill
.max
); /* Should always be at least one */
655 * If there's enough space, fill it now, otherwise return
656 * -ENOSPC and the number of devices affected.
658 if (hdr
.argsz
< sizeof(hdr
) + (fill
.max
* sizeof(*devices
))) {
660 hdr
.count
= fill
.max
;
661 goto reset_info_exit
;
664 devices
= kcalloc(fill
.max
, sizeof(*devices
), GFP_KERNEL
);
668 fill
.devices
= devices
;
670 ret
= vfio_pci_for_each_slot_or_bus(vdev
->pdev
,
675 * If a device was removed between counting and filling,
676 * we may come up short of fill.max. If a device was
677 * added, we'll have a return of -EAGAIN above.
680 hdr
.count
= fill
.cur
;
683 if (copy_to_user((void __user
*)arg
, &hdr
, minsz
))
687 if (copy_to_user((void __user
*)(arg
+ minsz
), devices
,
688 hdr
.count
* sizeof(*devices
)))
695 } else if (cmd
== VFIO_DEVICE_PCI_HOT_RESET
) {
696 struct vfio_pci_hot_reset hdr
;
698 struct vfio_pci_group_entry
*groups
;
699 struct vfio_pci_group_info info
;
701 int i
, count
= 0, ret
= 0;
703 minsz
= offsetofend(struct vfio_pci_hot_reset
, count
);
705 if (copy_from_user(&hdr
, (void __user
*)arg
, minsz
))
708 if (hdr
.argsz
< minsz
|| hdr
.flags
)
711 /* Can we do a slot or bus reset or neither? */
712 if (!pci_probe_reset_slot(vdev
->pdev
->slot
))
714 else if (pci_probe_reset_bus(vdev
->pdev
->bus
))
718 * We can't let userspace give us an arbitrarily large
719 * buffer to copy, so verify how many we think there
720 * could be. Note groups can have multiple devices so
721 * one group per device is the max.
723 ret
= vfio_pci_for_each_slot_or_bus(vdev
->pdev
,
729 /* Somewhere between 1 and count is OK */
730 if (!hdr
.count
|| hdr
.count
> count
)
733 group_fds
= kcalloc(hdr
.count
, sizeof(*group_fds
), GFP_KERNEL
);
734 groups
= kcalloc(hdr
.count
, sizeof(*groups
), GFP_KERNEL
);
735 if (!group_fds
|| !groups
) {
741 if (copy_from_user(group_fds
, (void __user
*)(arg
+ minsz
),
742 hdr
.count
* sizeof(*group_fds
))) {
749 * For each group_fd, get the group through the vfio external
750 * user interface and store the group and iommu ID. This
751 * ensures the group is held across the reset.
753 for (i
= 0; i
< hdr
.count
; i
++) {
754 struct vfio_group
*group
;
755 struct fd f
= fdget(group_fds
[i
]);
761 group
= vfio_group_get_external_user(f
.file
);
764 ret
= PTR_ERR(group
);
768 groups
[i
].group
= group
;
769 groups
[i
].id
= vfio_external_user_iommu_id(group
);
774 /* release reference to groups on error */
776 goto hot_reset_release
;
778 info
.count
= hdr
.count
;
779 info
.groups
= groups
;
782 * Test whether all the affected devices are contained
783 * by the set of groups provided by the user.
785 ret
= vfio_pci_for_each_slot_or_bus(vdev
->pdev
,
786 vfio_pci_validate_devs
,
789 /* User has access, do the reset */
790 ret
= slot
? pci_try_reset_slot(vdev
->pdev
->slot
) :
791 pci_try_reset_bus(vdev
->pdev
->bus
);
794 for (i
--; i
>= 0; i
--)
795 vfio_group_put_external_user(groups
[i
].group
);
804 static ssize_t
vfio_pci_rw(void *device_data
, char __user
*buf
,
805 size_t count
, loff_t
*ppos
, bool iswrite
)
807 unsigned int index
= VFIO_PCI_OFFSET_TO_INDEX(*ppos
);
808 struct vfio_pci_device
*vdev
= device_data
;
810 if (index
>= VFIO_PCI_NUM_REGIONS
)
814 case VFIO_PCI_CONFIG_REGION_INDEX
:
815 return vfio_pci_config_rw(vdev
, buf
, count
, ppos
, iswrite
);
817 case VFIO_PCI_ROM_REGION_INDEX
:
820 return vfio_pci_bar_rw(vdev
, buf
, count
, ppos
, false);
822 case VFIO_PCI_BAR0_REGION_INDEX
... VFIO_PCI_BAR5_REGION_INDEX
:
823 return vfio_pci_bar_rw(vdev
, buf
, count
, ppos
, iswrite
);
825 case VFIO_PCI_VGA_REGION_INDEX
:
826 return vfio_pci_vga_rw(vdev
, buf
, count
, ppos
, iswrite
);
832 static ssize_t
vfio_pci_read(void *device_data
, char __user
*buf
,
833 size_t count
, loff_t
*ppos
)
838 return vfio_pci_rw(device_data
, buf
, count
, ppos
, false);
841 static ssize_t
vfio_pci_write(void *device_data
, const char __user
*buf
,
842 size_t count
, loff_t
*ppos
)
847 return vfio_pci_rw(device_data
, (char __user
*)buf
, count
, ppos
, true);
850 static int vfio_pci_mmap(void *device_data
, struct vm_area_struct
*vma
)
852 struct vfio_pci_device
*vdev
= device_data
;
853 struct pci_dev
*pdev
= vdev
->pdev
;
855 u64 phys_len
, req_len
, pgoff
, req_start
;
858 index
= vma
->vm_pgoff
>> (VFIO_PCI_OFFSET_SHIFT
- PAGE_SHIFT
);
860 if (vma
->vm_end
< vma
->vm_start
)
862 if ((vma
->vm_flags
& VM_SHARED
) == 0)
864 if (index
>= VFIO_PCI_ROM_REGION_INDEX
)
866 if (!(pci_resource_flags(pdev
, index
) & IORESOURCE_MEM
))
869 phys_len
= pci_resource_len(pdev
, index
);
870 req_len
= vma
->vm_end
- vma
->vm_start
;
871 pgoff
= vma
->vm_pgoff
&
872 ((1U << (VFIO_PCI_OFFSET_SHIFT
- PAGE_SHIFT
)) - 1);
873 req_start
= pgoff
<< PAGE_SHIFT
;
875 if (phys_len
< PAGE_SIZE
|| req_start
+ req_len
> phys_len
)
878 if (index
== vdev
->msix_bar
) {
880 * Disallow mmaps overlapping the MSI-X table; users don't
881 * get to touch this directly. We could find somewhere
882 * else to map the overlap, but page granularity is only
883 * a recommendation, not a requirement, so the user needs
884 * to know which bits are real. Requiring them to mmap
885 * around the table makes that clear.
888 /* If neither entirely above nor below, then it overlaps */
889 if (!(req_start
>= vdev
->msix_offset
+ vdev
->msix_size
||
890 req_start
+ req_len
<= vdev
->msix_offset
))
895 * Even though we don't make use of the barmap for the mmap,
896 * we need to request the region and the barmap tracks that.
898 if (!vdev
->barmap
[index
]) {
899 ret
= pci_request_selected_regions(pdev
,
900 1 << index
, "vfio-pci");
904 vdev
->barmap
[index
] = pci_iomap(pdev
, index
, 0);
905 if (!vdev
->barmap
[index
]) {
906 pci_release_selected_regions(pdev
, 1 << index
);
911 vma
->vm_private_data
= vdev
;
912 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
913 vma
->vm_pgoff
= (pci_resource_start(pdev
, index
) >> PAGE_SHIFT
) + pgoff
;
915 return remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
916 req_len
, vma
->vm_page_prot
);
919 static void vfio_pci_request(void *device_data
, unsigned int count
)
921 struct vfio_pci_device
*vdev
= device_data
;
923 mutex_lock(&vdev
->igate
);
925 if (vdev
->req_trigger
) {
927 dev_notice_ratelimited(&vdev
->pdev
->dev
,
928 "Relaying device request to user (#%u)\n",
930 eventfd_signal(vdev
->req_trigger
, 1);
931 } else if (count
== 0) {
932 dev_warn(&vdev
->pdev
->dev
,
933 "No device request channel registered, blocked until released by user\n");
936 mutex_unlock(&vdev
->igate
);
939 static const struct vfio_device_ops vfio_pci_ops
= {
941 .open
= vfio_pci_open
,
942 .release
= vfio_pci_release
,
943 .ioctl
= vfio_pci_ioctl
,
944 .read
= vfio_pci_read
,
945 .write
= vfio_pci_write
,
946 .mmap
= vfio_pci_mmap
,
947 .request
= vfio_pci_request
,
950 static int vfio_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
952 struct vfio_pci_device
*vdev
;
953 struct iommu_group
*group
;
956 if (pdev
->hdr_type
!= PCI_HEADER_TYPE_NORMAL
)
959 group
= iommu_group_get(&pdev
->dev
);
963 vdev
= kzalloc(sizeof(*vdev
), GFP_KERNEL
);
965 iommu_group_put(group
);
970 vdev
->irq_type
= VFIO_PCI_NUM_IRQS
;
971 mutex_init(&vdev
->igate
);
972 spin_lock_init(&vdev
->irqlock
);
974 ret
= vfio_add_group_dev(&pdev
->dev
, &vfio_pci_ops
, vdev
);
976 iommu_group_put(group
);
981 if (vfio_pci_is_vga(pdev
)) {
982 vga_client_register(pdev
, vdev
, NULL
, vfio_pci_set_vga_decode
);
983 vga_set_legacy_decoding(pdev
,
984 vfio_pci_set_vga_decode(vdev
, false));
987 if (!disable_idle_d3
) {
989 * pci-core sets the device power state to an unknown value at
990 * bootup and after being removed from a driver. The only
991 * transition it allows from this unknown state is to D0, which
992 * typically happens when a driver calls pci_enable_device().
993 * We're not ready to enable the device yet, but we do want to
994 * be able to get to D3. Therefore first do a D0 transition
995 * before going to D3.
997 pci_set_power_state(pdev
, PCI_D0
);
998 pci_set_power_state(pdev
, PCI_D3hot
);
1004 static void vfio_pci_remove(struct pci_dev
*pdev
)
1006 struct vfio_pci_device
*vdev
;
1008 vdev
= vfio_del_group_dev(&pdev
->dev
);
1012 iommu_group_put(pdev
->dev
.iommu_group
);
1015 if (vfio_pci_is_vga(pdev
)) {
1016 vga_client_register(pdev
, NULL
, NULL
, NULL
);
1017 vga_set_legacy_decoding(pdev
,
1018 VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
|
1019 VGA_RSRC_LEGACY_IO
| VGA_RSRC_LEGACY_MEM
);
1022 if (!disable_idle_d3
)
1023 pci_set_power_state(pdev
, PCI_D0
);
1026 static pci_ers_result_t
vfio_pci_aer_err_detected(struct pci_dev
*pdev
,
1027 pci_channel_state_t state
)
1029 struct vfio_pci_device
*vdev
;
1030 struct vfio_device
*device
;
1032 device
= vfio_device_get_from_dev(&pdev
->dev
);
1034 return PCI_ERS_RESULT_DISCONNECT
;
1036 vdev
= vfio_device_data(device
);
1038 vfio_device_put(device
);
1039 return PCI_ERS_RESULT_DISCONNECT
;
1042 mutex_lock(&vdev
->igate
);
1044 if (vdev
->err_trigger
)
1045 eventfd_signal(vdev
->err_trigger
, 1);
1047 mutex_unlock(&vdev
->igate
);
1049 vfio_device_put(device
);
1051 return PCI_ERS_RESULT_CAN_RECOVER
;
1054 static const struct pci_error_handlers vfio_err_handlers
= {
1055 .error_detected
= vfio_pci_aer_err_detected
,
1058 static struct pci_driver vfio_pci_driver
= {
1060 .id_table
= NULL
, /* only dynamic ids */
1061 .probe
= vfio_pci_probe
,
1062 .remove
= vfio_pci_remove
,
1063 .err_handler
= &vfio_err_handlers
,
1066 struct vfio_devices
{
1067 struct vfio_device
**devices
;
1072 static int vfio_pci_get_devs(struct pci_dev
*pdev
, void *data
)
1074 struct vfio_devices
*devs
= data
;
1075 struct vfio_device
*device
;
1077 if (devs
->cur_index
== devs
->max_index
)
1080 device
= vfio_device_get_from_dev(&pdev
->dev
);
1084 if (pci_dev_driver(pdev
) != &vfio_pci_driver
) {
1085 vfio_device_put(device
);
1089 devs
->devices
[devs
->cur_index
++] = device
;
1094 * Attempt to do a bus/slot reset if there are devices affected by a reset for
1095 * this device that are needs_reset and all of the affected devices are unused
1096 * (!refcnt). Callers are required to hold driver_lock when calling this to
1097 * prevent device opens and concurrent bus reset attempts. We prevent device
1098 * unbinds by acquiring and holding a reference to the vfio_device.
1100 * NB: vfio-core considers a group to be viable even if some devices are
1101 * bound to drivers like pci-stub or pcieport. Here we require all devices
1102 * to be bound to vfio_pci since that's the only way we can be sure they
1105 static void vfio_pci_try_bus_reset(struct vfio_pci_device
*vdev
)
1107 struct vfio_devices devs
= { .cur_index
= 0 };
1108 int i
= 0, ret
= -EINVAL
;
1109 bool needs_reset
= false, slot
= false;
1110 struct vfio_pci_device
*tmp
;
1112 if (!pci_probe_reset_slot(vdev
->pdev
->slot
))
1114 else if (pci_probe_reset_bus(vdev
->pdev
->bus
))
1117 if (vfio_pci_for_each_slot_or_bus(vdev
->pdev
, vfio_pci_count_devs
,
1122 devs
.devices
= kcalloc(i
, sizeof(struct vfio_device
*), GFP_KERNEL
);
1126 if (vfio_pci_for_each_slot_or_bus(vdev
->pdev
,
1127 vfio_pci_get_devs
, &devs
, slot
))
1130 for (i
= 0; i
< devs
.cur_index
; i
++) {
1131 tmp
= vfio_device_data(devs
.devices
[i
]);
1132 if (tmp
->needs_reset
)
1139 ret
= slot
? pci_try_reset_slot(vdev
->pdev
->slot
) :
1140 pci_try_reset_bus(vdev
->pdev
->bus
);
1143 for (i
= 0; i
< devs
.cur_index
; i
++) {
1144 tmp
= vfio_device_data(devs
.devices
[i
]);
1146 tmp
->needs_reset
= false;
1148 if (!tmp
->refcnt
&& !disable_idle_d3
)
1149 pci_set_power_state(tmp
->pdev
, PCI_D3hot
);
1151 vfio_device_put(devs
.devices
[i
]);
1154 kfree(devs
.devices
);
1157 static void __exit
vfio_pci_cleanup(void)
1159 pci_unregister_driver(&vfio_pci_driver
);
1160 vfio_pci_uninit_perm_bits();
1163 static void __init
vfio_pci_fill_ids(void)
1168 /* no ids passed actually */
1172 /* add ids specified in the module parameter */
1174 while ((id
= strsep(&p
, ","))) {
1175 unsigned int vendor
, device
, subvendor
= PCI_ANY_ID
,
1176 subdevice
= PCI_ANY_ID
, class = 0, class_mask
= 0;
1182 fields
= sscanf(id
, "%x:%x:%x:%x:%x:%x",
1183 &vendor
, &device
, &subvendor
, &subdevice
,
1184 &class, &class_mask
);
1187 pr_warn("invalid id string \"%s\"\n", id
);
1191 rc
= pci_add_dynid(&vfio_pci_driver
, vendor
, device
,
1192 subvendor
, subdevice
, class, class_mask
, 0);
1194 pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n",
1195 vendor
, device
, subvendor
, subdevice
,
1196 class, class_mask
, rc
);
1198 pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n",
1199 vendor
, device
, subvendor
, subdevice
,
1204 static int __init
vfio_pci_init(void)
1208 /* Allocate shared config space permision data used by all devices */
1209 ret
= vfio_pci_init_perm_bits();
1213 /* Register and scan for devices */
1214 ret
= pci_register_driver(&vfio_pci_driver
);
1218 vfio_pci_fill_ids();
1223 vfio_pci_uninit_perm_bits();
1227 module_init(vfio_pci_init
);
1228 module_exit(vfio_pci_cleanup
);
1230 MODULE_VERSION(DRIVER_VERSION
);
1231 MODULE_LICENSE("GPL v2");
1232 MODULE_AUTHOR(DRIVER_AUTHOR
);
1233 MODULE_DESCRIPTION(DRIVER_DESC
);