2 * vfio based device assignment support
4 * Copyright Red Hat, Inc. 2012
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
23 #include <sys/ioctl.h>
26 #include <sys/types.h>
27 #include <linux/vfio.h>
30 #include "qemu/event_notifier.h"
31 #include "exec/address-spaces.h"
32 #include "sysemu/kvm.h"
33 #include "exec/memory.h"
37 #include "qemu-common.h"
38 #include "qemu/error-report.h"
39 #include "qemu/queue.h"
40 #include "qemu/range.h"
42 /* #define DEBUG_VFIO */
44 #define DPRINTF(fmt, ...) \
45 do { fprintf(stderr, "vfio: " fmt, ## __VA_ARGS__); } while (0)
47 #define DPRINTF(fmt, ...) \
51 typedef struct VFIOBAR
{
52 off_t fd_offset
; /* offset of BAR within device fd */
53 int fd
; /* device fd, allows us to pass VFIOBAR as opaque data */
54 MemoryRegion mem
; /* slow, read/write access */
55 MemoryRegion mmap_mem
; /* direct mapped access */
58 uint32_t flags
; /* VFIO region flags (rd/wr/mmap) */
59 uint8_t nr
; /* cache the BAR number for debug */
62 typedef struct VFIOINTx
{
63 bool pending
; /* interrupt pending */
64 bool kvm_accel
; /* set when QEMU bypass through KVM enabled */
65 uint8_t pin
; /* which pin to pull for qemu_set_irq */
66 EventNotifier interrupt
; /* eventfd triggered on interrupt */
67 EventNotifier unmask
; /* eventfd for unmask on QEMU bypass */
68 PCIINTxRoute route
; /* routing info for QEMU bypass */
69 uint32_t mmap_timeout
; /* delay to re-enable mmaps after interrupt */
70 QEMUTimer
*mmap_timer
; /* enable mmaps after periods w/o interrupts */
75 typedef struct VFIOMSIVector
{
76 EventNotifier interrupt
; /* eventfd triggered on interrupt */
77 struct VFIODevice
*vdev
; /* back pointer to device */
78 int virq
; /* KVM irqchip route for QEMU bypass */
91 typedef struct VFIOContainer
{
92 int fd
; /* /dev/vfio/vfio, empowered by the attached groups */
94 /* enable abstraction to support various iommu backends */
96 MemoryListener listener
; /* Used by type1 iommu */
98 void (*release
)(struct VFIOContainer
*);
100 QLIST_HEAD(, VFIOGroup
) group_list
;
101 QLIST_ENTRY(VFIOContainer
) next
;
104 /* Cache of MSI-X setup plus extra mmap and memory region for split BAR map */
105 typedef struct VFIOMSIXInfo
{
109 uint32_t table_offset
;
111 MemoryRegion mmap_mem
;
115 typedef struct VFIODevice
{
119 unsigned int config_size
;
120 off_t config_offset
; /* Offset of config space region within device fd */
121 unsigned int rom_size
;
122 off_t rom_offset
; /* Offset of ROM region within device fd */
124 VFIOMSIVector
*msi_vectors
;
126 int nr_vectors
; /* Number of MSI/MSIX vectors currently in use */
127 int interrupt
; /* Current interrupt type */
128 VFIOBAR bars
[PCI_NUM_REGIONS
- 1]; /* No ROM */
129 PCIHostDeviceAddress host
;
130 QLIST_ENTRY(VFIODevice
) next
;
131 struct VFIOGroup
*group
;
135 typedef struct VFIOGroup
{
138 VFIOContainer
*container
;
139 QLIST_HEAD(, VFIODevice
) device_list
;
140 QLIST_ENTRY(VFIOGroup
) next
;
141 QLIST_ENTRY(VFIOGroup
) container_next
;
144 #define MSIX_CAP_LENGTH 12
146 static QLIST_HEAD(, VFIOContainer
)
147 container_list
= QLIST_HEAD_INITIALIZER(container_list
);
149 static QLIST_HEAD(, VFIOGroup
)
150 group_list
= QLIST_HEAD_INITIALIZER(group_list
);
152 static void vfio_disable_interrupts(VFIODevice
*vdev
);
153 static uint32_t vfio_pci_read_config(PCIDevice
*pdev
, uint32_t addr
, int len
);
154 static void vfio_mmap_set_enabled(VFIODevice
*vdev
, bool enabled
);
157 * Common VFIO interrupt disable
159 static void vfio_disable_irqindex(VFIODevice
*vdev
, int index
)
161 struct vfio_irq_set irq_set
= {
162 .argsz
= sizeof(irq_set
),
163 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_TRIGGER
,
169 ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
175 static void vfio_unmask_intx(VFIODevice
*vdev
)
177 struct vfio_irq_set irq_set
= {
178 .argsz
= sizeof(irq_set
),
179 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_UNMASK
,
180 .index
= VFIO_PCI_INTX_IRQ_INDEX
,
185 ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
188 #ifdef CONFIG_KVM /* Unused outside of CONFIG_KVM code */
189 static void vfio_mask_intx(VFIODevice
*vdev
)
191 struct vfio_irq_set irq_set
= {
192 .argsz
= sizeof(irq_set
),
193 .flags
= VFIO_IRQ_SET_DATA_NONE
| VFIO_IRQ_SET_ACTION_MASK
,
194 .index
= VFIO_PCI_INTX_IRQ_INDEX
,
199 ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, &irq_set
);
204 * Disabling BAR mmaping can be slow, but toggling it around INTx can
205 * also be a huge overhead. We try to get the best of both worlds by
206 * waiting until an interrupt to disable mmaps (subsequent transitions
207 * to the same state are effectively no overhead). If the interrupt has
208 * been serviced and the time gap is long enough, we re-enable mmaps for
209 * performance. This works well for things like graphics cards, which
210 * may not use their interrupt at all and are penalized to an unusable
211 * level by read/write BAR traps. Other devices, like NICs, have more
212 * regular interrupts and see much better latency by staying in non-mmap
213 * mode. We therefore set the default mmap_timeout such that a ping
214 * is just enough to keep the mmap disabled. Users can experiment with
215 * other options with the x-intx-mmap-timeout-ms parameter (a value of
216 * zero disables the timer).
218 static void vfio_intx_mmap_enable(void *opaque
)
220 VFIODevice
*vdev
= opaque
;
222 if (vdev
->intx
.pending
) {
223 qemu_mod_timer(vdev
->intx
.mmap_timer
,
224 qemu_get_clock_ms(vm_clock
) + vdev
->intx
.mmap_timeout
);
228 vfio_mmap_set_enabled(vdev
, true);
231 static void vfio_intx_interrupt(void *opaque
)
233 VFIODevice
*vdev
= opaque
;
235 if (!event_notifier_test_and_clear(&vdev
->intx
.interrupt
)) {
239 DPRINTF("%s(%04x:%02x:%02x.%x) Pin %c\n", __func__
, vdev
->host
.domain
,
240 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
,
241 'A' + vdev
->intx
.pin
);
243 vdev
->intx
.pending
= true;
244 qemu_set_irq(vdev
->pdev
.irq
[vdev
->intx
.pin
], 1);
245 vfio_mmap_set_enabled(vdev
, false);
246 if (vdev
->intx
.mmap_timeout
) {
247 qemu_mod_timer(vdev
->intx
.mmap_timer
,
248 qemu_get_clock_ms(vm_clock
) + vdev
->intx
.mmap_timeout
);
252 static void vfio_eoi(VFIODevice
*vdev
)
254 if (!vdev
->intx
.pending
) {
258 DPRINTF("%s(%04x:%02x:%02x.%x) EOI\n", __func__
, vdev
->host
.domain
,
259 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
261 vdev
->intx
.pending
= false;
262 qemu_set_irq(vdev
->pdev
.irq
[vdev
->intx
.pin
], 0);
263 vfio_unmask_intx(vdev
);
266 static void vfio_enable_intx_kvm(VFIODevice
*vdev
)
269 struct kvm_irqfd irqfd
= {
270 .fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
),
271 .gsi
= vdev
->intx
.route
.irq
,
272 .flags
= KVM_IRQFD_FLAG_RESAMPLE
,
274 struct vfio_irq_set
*irq_set
;
278 if (!kvm_irqfds_enabled() ||
279 vdev
->intx
.route
.mode
!= PCI_INTX_ENABLED
||
280 !kvm_check_extension(kvm_state
, KVM_CAP_IRQFD_RESAMPLE
)) {
284 /* Get to a known interrupt state */
285 qemu_set_fd_handler(irqfd
.fd
, NULL
, NULL
, vdev
);
286 vfio_mask_intx(vdev
);
287 vdev
->intx
.pending
= false;
288 qemu_set_irq(vdev
->pdev
.irq
[vdev
->intx
.pin
], 0);
290 /* Get an eventfd for resample/unmask */
291 if (event_notifier_init(&vdev
->intx
.unmask
, 0)) {
292 error_report("vfio: Error: event_notifier_init failed eoi\n");
296 /* KVM triggers it, VFIO listens for it */
297 irqfd
.resamplefd
= event_notifier_get_fd(&vdev
->intx
.unmask
);
299 if (kvm_vm_ioctl(kvm_state
, KVM_IRQFD
, &irqfd
)) {
300 error_report("vfio: Error: Failed to setup resample irqfd: %m\n");
304 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
306 irq_set
= g_malloc0(argsz
);
307 irq_set
->argsz
= argsz
;
308 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
| VFIO_IRQ_SET_ACTION_UNMASK
;
309 irq_set
->index
= VFIO_PCI_INTX_IRQ_INDEX
;
312 pfd
= (int32_t *)&irq_set
->data
;
314 *pfd
= irqfd
.resamplefd
;
316 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
319 error_report("vfio: Error: Failed to setup INTx unmask fd: %m\n");
324 vfio_unmask_intx(vdev
);
326 vdev
->intx
.kvm_accel
= true;
328 DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel enabled\n",
329 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
330 vdev
->host
.slot
, vdev
->host
.function
);
335 irqfd
.flags
= KVM_IRQFD_FLAG_DEASSIGN
;
336 kvm_vm_ioctl(kvm_state
, KVM_IRQFD
, &irqfd
);
338 event_notifier_cleanup(&vdev
->intx
.unmask
);
340 qemu_set_fd_handler(irqfd
.fd
, vfio_intx_interrupt
, NULL
, vdev
);
341 vfio_unmask_intx(vdev
);
345 static void vfio_disable_intx_kvm(VFIODevice
*vdev
)
348 struct kvm_irqfd irqfd
= {
349 .fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
),
350 .gsi
= vdev
->intx
.route
.irq
,
351 .flags
= KVM_IRQFD_FLAG_DEASSIGN
,
354 if (!vdev
->intx
.kvm_accel
) {
359 * Get to a known state, hardware masked, QEMU ready to accept new
360 * interrupts, QEMU IRQ de-asserted.
362 vfio_mask_intx(vdev
);
363 vdev
->intx
.pending
= false;
364 qemu_set_irq(vdev
->pdev
.irq
[vdev
->intx
.pin
], 0);
366 /* Tell KVM to stop listening for an INTx irqfd */
367 if (kvm_vm_ioctl(kvm_state
, KVM_IRQFD
, &irqfd
)) {
368 error_report("vfio: Error: Failed to disable INTx irqfd: %m\n");
371 /* We only need to close the eventfd for VFIO to cleanup the kernel side */
372 event_notifier_cleanup(&vdev
->intx
.unmask
);
374 /* QEMU starts listening for interrupt events. */
375 qemu_set_fd_handler(irqfd
.fd
, vfio_intx_interrupt
, NULL
, vdev
);
377 vdev
->intx
.kvm_accel
= false;
379 /* If we've missed an event, let it re-fire through QEMU */
380 vfio_unmask_intx(vdev
);
382 DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel disabled\n",
383 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
384 vdev
->host
.slot
, vdev
->host
.function
);
388 static void vfio_update_irq(PCIDevice
*pdev
)
390 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
393 if (vdev
->interrupt
!= VFIO_INT_INTx
) {
397 route
= pci_device_route_intx_to_irq(&vdev
->pdev
, vdev
->intx
.pin
);
399 if (!pci_intx_route_changed(&vdev
->intx
.route
, &route
)) {
400 return; /* Nothing changed */
403 DPRINTF("%s(%04x:%02x:%02x.%x) IRQ moved %d -> %d\n", __func__
,
404 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
405 vdev
->host
.function
, vdev
->intx
.route
.irq
, route
.irq
);
407 vfio_disable_intx_kvm(vdev
);
409 vdev
->intx
.route
= route
;
411 if (route
.mode
!= PCI_INTX_ENABLED
) {
415 vfio_enable_intx_kvm(vdev
);
417 /* Re-enable the interrupt in cased we missed an EOI */
421 static int vfio_enable_intx(VFIODevice
*vdev
)
423 uint8_t pin
= vfio_pci_read_config(&vdev
->pdev
, PCI_INTERRUPT_PIN
, 1);
425 struct vfio_irq_set
*irq_set
;
432 vfio_disable_interrupts(vdev
);
434 vdev
->intx
.pin
= pin
- 1; /* Pin A (1) -> irq[0] */
438 * Only conditional to avoid generating error messages on platforms
439 * where we won't actually use the result anyway.
441 if (kvm_irqfds_enabled() &&
442 kvm_check_extension(kvm_state
, KVM_CAP_IRQFD_RESAMPLE
)) {
443 vdev
->intx
.route
= pci_device_route_intx_to_irq(&vdev
->pdev
,
448 ret
= event_notifier_init(&vdev
->intx
.interrupt
, 0);
450 error_report("vfio: Error: event_notifier_init failed\n");
454 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
456 irq_set
= g_malloc0(argsz
);
457 irq_set
->argsz
= argsz
;
458 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
| VFIO_IRQ_SET_ACTION_TRIGGER
;
459 irq_set
->index
= VFIO_PCI_INTX_IRQ_INDEX
;
462 pfd
= (int32_t *)&irq_set
->data
;
464 *pfd
= event_notifier_get_fd(&vdev
->intx
.interrupt
);
465 qemu_set_fd_handler(*pfd
, vfio_intx_interrupt
, NULL
, vdev
);
467 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
470 error_report("vfio: Error: Failed to setup INTx fd: %m\n");
471 qemu_set_fd_handler(*pfd
, NULL
, NULL
, vdev
);
472 event_notifier_cleanup(&vdev
->intx
.interrupt
);
476 vfio_enable_intx_kvm(vdev
);
478 vdev
->interrupt
= VFIO_INT_INTx
;
480 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
481 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
486 static void vfio_disable_intx(VFIODevice
*vdev
)
490 qemu_del_timer(vdev
->intx
.mmap_timer
);
491 vfio_disable_intx_kvm(vdev
);
492 vfio_disable_irqindex(vdev
, VFIO_PCI_INTX_IRQ_INDEX
);
493 vdev
->intx
.pending
= false;
494 qemu_set_irq(vdev
->pdev
.irq
[vdev
->intx
.pin
], 0);
495 vfio_mmap_set_enabled(vdev
, true);
497 fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
);
498 qemu_set_fd_handler(fd
, NULL
, NULL
, vdev
);
499 event_notifier_cleanup(&vdev
->intx
.interrupt
);
501 vdev
->interrupt
= VFIO_INT_NONE
;
503 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
504 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
510 static void vfio_msi_interrupt(void *opaque
)
512 VFIOMSIVector
*vector
= opaque
;
513 VFIODevice
*vdev
= vector
->vdev
;
514 int nr
= vector
- vdev
->msi_vectors
;
516 if (!event_notifier_test_and_clear(&vector
->interrupt
)) {
520 DPRINTF("%s(%04x:%02x:%02x.%x) vector %d\n", __func__
,
521 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
522 vdev
->host
.function
, nr
);
524 if (vdev
->interrupt
== VFIO_INT_MSIX
) {
525 msix_notify(&vdev
->pdev
, nr
);
526 } else if (vdev
->interrupt
== VFIO_INT_MSI
) {
527 msi_notify(&vdev
->pdev
, nr
);
529 error_report("vfio: MSI interrupt receieved, but not enabled?\n");
533 static int vfio_enable_vectors(VFIODevice
*vdev
, bool msix
)
535 struct vfio_irq_set
*irq_set
;
536 int ret
= 0, i
, argsz
;
539 argsz
= sizeof(*irq_set
) + (vdev
->nr_vectors
* sizeof(*fds
));
541 irq_set
= g_malloc0(argsz
);
542 irq_set
->argsz
= argsz
;
543 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
| VFIO_IRQ_SET_ACTION_TRIGGER
;
544 irq_set
->index
= msix
? VFIO_PCI_MSIX_IRQ_INDEX
: VFIO_PCI_MSI_IRQ_INDEX
;
546 irq_set
->count
= vdev
->nr_vectors
;
547 fds
= (int32_t *)&irq_set
->data
;
549 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
550 if (!vdev
->msi_vectors
[i
].use
) {
555 fds
[i
] = event_notifier_get_fd(&vdev
->msi_vectors
[i
].interrupt
);
558 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
565 static int vfio_msix_vector_do_use(PCIDevice
*pdev
, unsigned int nr
,
566 MSIMessage
*msg
, IOHandler
*handler
)
568 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
569 VFIOMSIVector
*vector
;
572 DPRINTF("%s(%04x:%02x:%02x.%x) vector %d used\n", __func__
,
573 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
574 vdev
->host
.function
, nr
);
576 vector
= &vdev
->msi_vectors
[nr
];
580 msix_vector_use(pdev
, nr
);
582 if (event_notifier_init(&vector
->interrupt
, 0)) {
583 error_report("vfio: Error: event_notifier_init failed\n");
587 * Attempt to enable route through KVM irqchip,
588 * default to userspace handling if unavailable.
590 vector
->virq
= msg
? kvm_irqchip_add_msi_route(kvm_state
, *msg
) : -1;
591 if (vector
->virq
< 0 ||
592 kvm_irqchip_add_irqfd_notifier(kvm_state
, &vector
->interrupt
,
594 if (vector
->virq
>= 0) {
595 kvm_irqchip_release_virq(kvm_state
, vector
->virq
);
598 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
599 handler
, NULL
, vector
);
603 * We don't want to have the host allocate all possible MSI vectors
604 * for a device if they're not in use, so we shutdown and incrementally
605 * increase them as needed.
607 if (vdev
->nr_vectors
< nr
+ 1) {
608 vfio_disable_irqindex(vdev
, VFIO_PCI_MSIX_IRQ_INDEX
);
609 vdev
->nr_vectors
= nr
+ 1;
610 ret
= vfio_enable_vectors(vdev
, true);
612 error_report("vfio: failed to enable vectors, %d\n", ret
);
616 struct vfio_irq_set
*irq_set
;
619 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
621 irq_set
= g_malloc0(argsz
);
622 irq_set
->argsz
= argsz
;
623 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
|
624 VFIO_IRQ_SET_ACTION_TRIGGER
;
625 irq_set
->index
= VFIO_PCI_MSIX_IRQ_INDEX
;
628 pfd
= (int32_t *)&irq_set
->data
;
630 *pfd
= event_notifier_get_fd(&vector
->interrupt
);
632 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
635 error_report("vfio: failed to modify vector, %d\n", ret
);
642 static int vfio_msix_vector_use(PCIDevice
*pdev
,
643 unsigned int nr
, MSIMessage msg
)
645 return vfio_msix_vector_do_use(pdev
, nr
, &msg
, vfio_msi_interrupt
);
648 static void vfio_msix_vector_release(PCIDevice
*pdev
, unsigned int nr
)
650 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
651 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[nr
];
653 struct vfio_irq_set
*irq_set
;
656 DPRINTF("%s(%04x:%02x:%02x.%x) vector %d released\n", __func__
,
657 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
658 vdev
->host
.function
, nr
);
661 * XXX What's the right thing to do here? This turns off the interrupt
662 * completely, but do we really just want to switch the interrupt to
663 * bouncing through userspace and let msix.c drop it? Not sure.
665 msix_vector_unuse(pdev
, nr
);
667 argsz
= sizeof(*irq_set
) + sizeof(*pfd
);
669 irq_set
= g_malloc0(argsz
);
670 irq_set
->argsz
= argsz
;
671 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
|
672 VFIO_IRQ_SET_ACTION_TRIGGER
;
673 irq_set
->index
= VFIO_PCI_MSIX_IRQ_INDEX
;
676 pfd
= (int32_t *)&irq_set
->data
;
680 ioctl(vdev
->fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
684 if (vector
->virq
< 0) {
685 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
688 kvm_irqchip_remove_irqfd_notifier(kvm_state
, &vector
->interrupt
,
690 kvm_irqchip_release_virq(kvm_state
, vector
->virq
);
694 event_notifier_cleanup(&vector
->interrupt
);
698 static void vfio_enable_msix(VFIODevice
*vdev
)
700 vfio_disable_interrupts(vdev
);
702 vdev
->msi_vectors
= g_malloc0(vdev
->msix
->entries
* sizeof(VFIOMSIVector
));
704 vdev
->interrupt
= VFIO_INT_MSIX
;
707 * Some communication channels between VF & PF or PF & fw rely on the
708 * physical state of the device and expect that enabling MSI-X from the
709 * guest enables the same on the host. When our guest is Linux, the
710 * guest driver call to pci_enable_msix() sets the enabling bit in the
711 * MSI-X capability, but leaves the vector table masked. We therefore
712 * can't rely on a vector_use callback (from request_irq() in the guest)
713 * to switch the physical device into MSI-X mode because that may come a
714 * long time after pci_enable_msix(). This code enables vector 0 with
715 * triggering to userspace, then immediately release the vector, leaving
716 * the physical device with no vectors enabled, but MSI-X enabled, just
717 * like the guest view.
719 vfio_msix_vector_do_use(&vdev
->pdev
, 0, NULL
, NULL
);
720 vfio_msix_vector_release(&vdev
->pdev
, 0);
722 if (msix_set_vector_notifiers(&vdev
->pdev
, vfio_msix_vector_use
,
723 vfio_msix_vector_release
, NULL
)) {
724 error_report("vfio: msix_set_vector_notifiers failed\n");
727 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
728 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
731 static void vfio_enable_msi(VFIODevice
*vdev
)
735 vfio_disable_interrupts(vdev
);
737 vdev
->nr_vectors
= msi_nr_vectors_allocated(&vdev
->pdev
);
739 vdev
->msi_vectors
= g_malloc0(vdev
->nr_vectors
* sizeof(VFIOMSIVector
));
741 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
743 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
748 if (event_notifier_init(&vector
->interrupt
, 0)) {
749 error_report("vfio: Error: event_notifier_init failed\n");
752 msg
= msi_get_message(&vdev
->pdev
, i
);
755 * Attempt to enable route through KVM irqchip,
756 * default to userspace handling if unavailable.
758 vector
->virq
= kvm_irqchip_add_msi_route(kvm_state
, msg
);
759 if (vector
->virq
< 0 ||
760 kvm_irqchip_add_irqfd_notifier(kvm_state
, &vector
->interrupt
,
762 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
763 vfio_msi_interrupt
, NULL
, vector
);
767 ret
= vfio_enable_vectors(vdev
, false);
770 error_report("vfio: Error: Failed to setup MSI fds: %m\n");
771 } else if (ret
!= vdev
->nr_vectors
) {
772 error_report("vfio: Error: Failed to enable %d "
773 "MSI vectors, retry with %d\n", vdev
->nr_vectors
, ret
);
776 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
777 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
778 if (vector
->virq
>= 0) {
779 kvm_irqchip_remove_irqfd_notifier(kvm_state
, &vector
->interrupt
,
781 kvm_irqchip_release_virq(kvm_state
, vector
->virq
);
784 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
787 event_notifier_cleanup(&vector
->interrupt
);
790 g_free(vdev
->msi_vectors
);
792 if (ret
> 0 && ret
!= vdev
->nr_vectors
) {
793 vdev
->nr_vectors
= ret
;
796 vdev
->nr_vectors
= 0;
801 vdev
->interrupt
= VFIO_INT_MSI
;
803 DPRINTF("%s(%04x:%02x:%02x.%x) Enabled %d MSI vectors\n", __func__
,
804 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
805 vdev
->host
.function
, vdev
->nr_vectors
);
808 static void vfio_disable_msi_common(VFIODevice
*vdev
)
810 g_free(vdev
->msi_vectors
);
811 vdev
->msi_vectors
= NULL
;
812 vdev
->nr_vectors
= 0;
813 vdev
->interrupt
= VFIO_INT_NONE
;
815 vfio_enable_intx(vdev
);
818 static void vfio_disable_msix(VFIODevice
*vdev
)
820 msix_unset_vector_notifiers(&vdev
->pdev
);
822 if (vdev
->nr_vectors
) {
823 vfio_disable_irqindex(vdev
, VFIO_PCI_MSIX_IRQ_INDEX
);
826 vfio_disable_msi_common(vdev
);
828 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
829 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
832 static void vfio_disable_msi(VFIODevice
*vdev
)
836 vfio_disable_irqindex(vdev
, VFIO_PCI_MSI_IRQ_INDEX
);
838 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
839 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
845 if (vector
->virq
>= 0) {
846 kvm_irqchip_remove_irqfd_notifier(kvm_state
,
847 &vector
->interrupt
, vector
->virq
);
848 kvm_irqchip_release_virq(kvm_state
, vector
->virq
);
851 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
855 event_notifier_cleanup(&vector
->interrupt
);
858 vfio_disable_msi_common(vdev
);
860 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
861 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
865 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
867 static void vfio_bar_write(void *opaque
, hwaddr addr
,
868 uint64_t data
, unsigned size
)
870 VFIOBAR
*bar
= opaque
;
883 buf
.word
= cpu_to_le16(data
);
886 buf
.dword
= cpu_to_le32(data
);
889 hw_error("vfio: unsupported write size, %d bytes\n", size
);
893 if (pwrite(bar
->fd
, &buf
, size
, bar
->fd_offset
+ addr
) != size
) {
894 error_report("%s(,0x%"HWADDR_PRIx
", 0x%"PRIx64
", %d) failed: %m\n",
895 __func__
, addr
, data
, size
);
898 DPRINTF("%s(BAR%d+0x%"HWADDR_PRIx
", 0x%"PRIx64
", %d)\n",
899 __func__
, bar
->nr
, addr
, data
, size
);
902 * A read or write to a BAR always signals an INTx EOI. This will
903 * do nothing if not pending (including not in INTx mode). We assume
904 * that a BAR access is in response to an interrupt and that BAR
905 * accesses will service the interrupt. Unfortunately, we don't know
906 * which access will service the interrupt, so we're potentially
907 * getting quite a few host interrupts per guest interrupt.
909 vfio_eoi(container_of(bar
, VFIODevice
, bars
[bar
->nr
]));
912 static uint64_t vfio_bar_read(void *opaque
,
913 hwaddr addr
, unsigned size
)
915 VFIOBAR
*bar
= opaque
;
924 if (pread(bar
->fd
, &buf
, size
, bar
->fd_offset
+ addr
) != size
) {
925 error_report("%s(,0x%"HWADDR_PRIx
", %d) failed: %m\n",
926 __func__
, addr
, size
);
935 data
= le16_to_cpu(buf
.word
);
938 data
= le32_to_cpu(buf
.dword
);
941 hw_error("vfio: unsupported read size, %d bytes\n", size
);
945 DPRINTF("%s(BAR%d+0x%"HWADDR_PRIx
", %d) = 0x%"PRIx64
"\n",
946 __func__
, bar
->nr
, addr
, size
, data
);
948 /* Same as write above */
949 vfio_eoi(container_of(bar
, VFIODevice
, bars
[bar
->nr
]));
954 static const MemoryRegionOps vfio_bar_ops
= {
955 .read
= vfio_bar_read
,
956 .write
= vfio_bar_write
,
957 .endianness
= DEVICE_LITTLE_ENDIAN
,
963 static uint32_t vfio_pci_read_config(PCIDevice
*pdev
, uint32_t addr
, int len
)
965 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
969 * We only need QEMU PCI config support for the ROM BAR, the MSI and MSIX
970 * capabilities, and the multifunction bit below. We let VFIO handle
971 * virtualizing everything else. Performance is not a concern here.
973 if (ranges_overlap(addr
, len
, PCI_ROM_ADDRESS
, 4) ||
974 (pdev
->cap_present
& QEMU_PCI_CAP_MSIX
&&
975 ranges_overlap(addr
, len
, pdev
->msix_cap
, MSIX_CAP_LENGTH
)) ||
976 (pdev
->cap_present
& QEMU_PCI_CAP_MSI
&&
977 ranges_overlap(addr
, len
, pdev
->msi_cap
, vdev
->msi_cap_size
))) {
979 val
= pci_default_read_config(pdev
, addr
, len
);
981 if (pread(vdev
->fd
, &val
, len
, vdev
->config_offset
+ addr
) != len
) {
982 error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x) failed: %m\n",
983 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
984 vdev
->host
.slot
, vdev
->host
.function
, addr
, len
);
987 val
= le32_to_cpu(val
);
990 /* Multifunction bit is virualized in QEMU */
991 if (unlikely(ranges_overlap(addr
, len
, PCI_HEADER_TYPE
, 1))) {
992 uint32_t mask
= PCI_HEADER_TYPE_MULTI_FUNCTION
;
998 if (pdev
->cap_present
& QEMU_PCI_CAP_MULTIFUNCTION
) {
1005 DPRINTF("%s(%04x:%02x:%02x.%x, @0x%x, len=0x%x) %x\n", __func__
,
1006 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1007 vdev
->host
.function
, addr
, len
, val
);
1012 static void vfio_pci_write_config(PCIDevice
*pdev
, uint32_t addr
,
1013 uint32_t val
, int len
)
1015 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
1016 uint32_t val_le
= cpu_to_le32(val
);
1018 DPRINTF("%s(%04x:%02x:%02x.%x, @0x%x, 0x%x, len=0x%x)\n", __func__
,
1019 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1020 vdev
->host
.function
, addr
, val
, len
);
1022 /* Write everything to VFIO, let it filter out what we can't write */
1023 if (pwrite(vdev
->fd
, &val_le
, len
, vdev
->config_offset
+ addr
) != len
) {
1024 error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x, 0x%x) failed: %m\n",
1025 __func__
, vdev
->host
.domain
, vdev
->host
.bus
,
1026 vdev
->host
.slot
, vdev
->host
.function
, addr
, val
, len
);
1029 /* Write standard header bits to emulation */
1030 if (addr
< PCI_CONFIG_HEADER_SIZE
) {
1031 pci_default_write_config(pdev
, addr
, val
, len
);
1035 /* MSI/MSI-X Enabling/Disabling */
1036 if (pdev
->cap_present
& QEMU_PCI_CAP_MSI
&&
1037 ranges_overlap(addr
, len
, pdev
->msi_cap
, vdev
->msi_cap_size
)) {
1038 int is_enabled
, was_enabled
= msi_enabled(pdev
);
1040 pci_default_write_config(pdev
, addr
, val
, len
);
1042 is_enabled
= msi_enabled(pdev
);
1044 if (!was_enabled
&& is_enabled
) {
1045 vfio_enable_msi(vdev
);
1046 } else if (was_enabled
&& !is_enabled
) {
1047 vfio_disable_msi(vdev
);
1051 if (pdev
->cap_present
& QEMU_PCI_CAP_MSIX
&&
1052 ranges_overlap(addr
, len
, pdev
->msix_cap
, MSIX_CAP_LENGTH
)) {
1053 int is_enabled
, was_enabled
= msix_enabled(pdev
);
1055 pci_default_write_config(pdev
, addr
, val
, len
);
1057 is_enabled
= msix_enabled(pdev
);
1059 if (!was_enabled
&& is_enabled
) {
1060 vfio_enable_msix(vdev
);
1061 } else if (was_enabled
&& !is_enabled
) {
1062 vfio_disable_msix(vdev
);
1068 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
1070 static int vfio_dma_unmap(VFIOContainer
*container
,
1071 hwaddr iova
, ram_addr_t size
)
1073 struct vfio_iommu_type1_dma_unmap unmap
= {
1074 .argsz
= sizeof(unmap
),
1080 if (ioctl(container
->fd
, VFIO_IOMMU_UNMAP_DMA
, &unmap
)) {
1081 DPRINTF("VFIO_UNMAP_DMA: %d\n", -errno
);
1088 static int vfio_dma_map(VFIOContainer
*container
, hwaddr iova
,
1089 ram_addr_t size
, void *vaddr
, bool readonly
)
1091 struct vfio_iommu_type1_dma_map map
= {
1092 .argsz
= sizeof(map
),
1093 .flags
= VFIO_DMA_MAP_FLAG_READ
,
1094 .vaddr
= (__u64
)(uintptr_t)vaddr
,
1100 map
.flags
|= VFIO_DMA_MAP_FLAG_WRITE
;
1104 * Try the mapping, if it fails with EBUSY, unmap the region and try
1105 * again. This shouldn't be necessary, but we sometimes see it in
1106 * the the VGA ROM space.
1108 if (ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0 ||
1109 (errno
== EBUSY
&& vfio_dma_unmap(container
, iova
, size
) == 0 &&
1110 ioctl(container
->fd
, VFIO_IOMMU_MAP_DMA
, &map
) == 0)) {
1114 DPRINTF("VFIO_MAP_DMA: %d\n", -errno
);
1118 static bool vfio_listener_skipped_section(MemoryRegionSection
*section
)
1120 return !memory_region_is_ram(section
->mr
);
1123 static void vfio_listener_region_add(MemoryListener
*listener
,
1124 MemoryRegionSection
*section
)
1126 VFIOContainer
*container
= container_of(listener
, VFIOContainer
,
1127 iommu_data
.listener
);
1132 if (vfio_listener_skipped_section(section
)) {
1133 DPRINTF("vfio: SKIPPING region_add %"HWADDR_PRIx
" - %"PRIx64
"\n",
1134 section
->offset_within_address_space
,
1135 section
->offset_within_address_space
+ section
->size
- 1);
1139 if (unlikely((section
->offset_within_address_space
& ~TARGET_PAGE_MASK
) !=
1140 (section
->offset_within_region
& ~TARGET_PAGE_MASK
))) {
1141 error_report("%s received unaligned region\n", __func__
);
1145 iova
= TARGET_PAGE_ALIGN(section
->offset_within_address_space
);
1146 end
= (section
->offset_within_address_space
+ section
->size
) &
1153 vaddr
= memory_region_get_ram_ptr(section
->mr
) +
1154 section
->offset_within_region
+
1155 (iova
- section
->offset_within_address_space
);
1157 DPRINTF("vfio: region_add %"HWADDR_PRIx
" - %"HWADDR_PRIx
" [%p]\n",
1158 iova
, end
- 1, vaddr
);
1160 ret
= vfio_dma_map(container
, iova
, end
- iova
, vaddr
, section
->readonly
);
1162 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx
", "
1163 "0x%"HWADDR_PRIx
", %p) = %d (%m)\n",
1164 container
, iova
, end
- iova
, vaddr
, ret
);
1168 static void vfio_listener_region_del(MemoryListener
*listener
,
1169 MemoryRegionSection
*section
)
1171 VFIOContainer
*container
= container_of(listener
, VFIOContainer
,
1172 iommu_data
.listener
);
1176 if (vfio_listener_skipped_section(section
)) {
1177 DPRINTF("vfio: SKIPPING region_del %"HWADDR_PRIx
" - %"PRIx64
"\n",
1178 section
->offset_within_address_space
,
1179 section
->offset_within_address_space
+ section
->size
- 1);
1183 if (unlikely((section
->offset_within_address_space
& ~TARGET_PAGE_MASK
) !=
1184 (section
->offset_within_region
& ~TARGET_PAGE_MASK
))) {
1185 error_report("%s received unaligned region\n", __func__
);
1189 iova
= TARGET_PAGE_ALIGN(section
->offset_within_address_space
);
1190 end
= (section
->offset_within_address_space
+ section
->size
) &
1197 DPRINTF("vfio: region_del %"HWADDR_PRIx
" - %"HWADDR_PRIx
"\n",
1200 ret
= vfio_dma_unmap(container
, iova
, end
- iova
);
1202 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx
", "
1203 "0x%"HWADDR_PRIx
") = %d (%m)\n",
1204 container
, iova
, end
- iova
, ret
);
1208 static MemoryListener vfio_memory_listener
= {
1209 .region_add
= vfio_listener_region_add
,
1210 .region_del
= vfio_listener_region_del
,
1213 static void vfio_listener_release(VFIOContainer
*container
)
1215 memory_listener_unregister(&container
->iommu_data
.listener
);
1221 static void vfio_disable_interrupts(VFIODevice
*vdev
)
1223 switch (vdev
->interrupt
) {
1225 vfio_disable_intx(vdev
);
1228 vfio_disable_msi(vdev
);
1231 vfio_disable_msix(vdev
);
1236 static int vfio_setup_msi(VFIODevice
*vdev
, int pos
)
1239 bool msi_64bit
, msi_maskbit
;
1242 if (pread(vdev
->fd
, &ctrl
, sizeof(ctrl
),
1243 vdev
->config_offset
+ pos
+ PCI_CAP_FLAGS
) != sizeof(ctrl
)) {
1246 ctrl
= le16_to_cpu(ctrl
);
1248 msi_64bit
= !!(ctrl
& PCI_MSI_FLAGS_64BIT
);
1249 msi_maskbit
= !!(ctrl
& PCI_MSI_FLAGS_MASKBIT
);
1250 entries
= 1 << ((ctrl
& PCI_MSI_FLAGS_QMASK
) >> 1);
1252 DPRINTF("%04x:%02x:%02x.%x PCI MSI CAP @0x%x\n", vdev
->host
.domain
,
1253 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
, pos
);
1255 ret
= msi_init(&vdev
->pdev
, pos
, entries
, msi_64bit
, msi_maskbit
);
1257 if (ret
== -ENOTSUP
) {
1260 error_report("vfio: msi_init failed\n");
1263 vdev
->msi_cap_size
= 0xa + (msi_maskbit
? 0xa : 0) + (msi_64bit
? 0x4 : 0);
1269 * We don't have any control over how pci_add_capability() inserts
1270 * capabilities into the chain. In order to setup MSI-X we need a
1271 * MemoryRegion for the BAR. In order to setup the BAR and not
1272 * attempt to mmap the MSI-X table area, which VFIO won't allow, we
1273 * need to first look for where the MSI-X table lives. So we
1274 * unfortunately split MSI-X setup across two functions.
1276 static int vfio_early_setup_msix(VFIODevice
*vdev
)
1280 uint32_t table
, pba
;
1282 pos
= pci_find_capability(&vdev
->pdev
, PCI_CAP_ID_MSIX
);
1287 if (pread(vdev
->fd
, &ctrl
, sizeof(ctrl
),
1288 vdev
->config_offset
+ pos
+ PCI_CAP_FLAGS
) != sizeof(ctrl
)) {
1292 if (pread(vdev
->fd
, &table
, sizeof(table
),
1293 vdev
->config_offset
+ pos
+ PCI_MSIX_TABLE
) != sizeof(table
)) {
1297 if (pread(vdev
->fd
, &pba
, sizeof(pba
),
1298 vdev
->config_offset
+ pos
+ PCI_MSIX_PBA
) != sizeof(pba
)) {
1302 ctrl
= le16_to_cpu(ctrl
);
1303 table
= le32_to_cpu(table
);
1304 pba
= le32_to_cpu(pba
);
1306 vdev
->msix
= g_malloc0(sizeof(*(vdev
->msix
)));
1307 vdev
->msix
->table_bar
= table
& PCI_MSIX_FLAGS_BIRMASK
;
1308 vdev
->msix
->table_offset
= table
& ~PCI_MSIX_FLAGS_BIRMASK
;
1309 vdev
->msix
->pba_bar
= pba
& PCI_MSIX_FLAGS_BIRMASK
;
1310 vdev
->msix
->pba_offset
= pba
& ~PCI_MSIX_FLAGS_BIRMASK
;
1311 vdev
->msix
->entries
= (ctrl
& PCI_MSIX_FLAGS_QSIZE
) + 1;
1313 DPRINTF("%04x:%02x:%02x.%x "
1314 "PCI MSI-X CAP @0x%x, BAR %d, offset 0x%x, entries %d\n",
1315 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1316 vdev
->host
.function
, pos
, vdev
->msix
->table_bar
,
1317 vdev
->msix
->table_offset
, vdev
->msix
->entries
);
1322 static int vfio_setup_msix(VFIODevice
*vdev
, int pos
)
1326 ret
= msix_init(&vdev
->pdev
, vdev
->msix
->entries
,
1327 &vdev
->bars
[vdev
->msix
->table_bar
].mem
,
1328 vdev
->msix
->table_bar
, vdev
->msix
->table_offset
,
1329 &vdev
->bars
[vdev
->msix
->pba_bar
].mem
,
1330 vdev
->msix
->pba_bar
, vdev
->msix
->pba_offset
, pos
);
1332 if (ret
== -ENOTSUP
) {
1335 error_report("vfio: msix_init failed\n");
1342 static void vfio_teardown_msi(VFIODevice
*vdev
)
1344 msi_uninit(&vdev
->pdev
);
1347 msix_uninit(&vdev
->pdev
, &vdev
->bars
[vdev
->msix
->table_bar
].mem
,
1348 &vdev
->bars
[vdev
->msix
->pba_bar
].mem
);
1355 static void vfio_mmap_set_enabled(VFIODevice
*vdev
, bool enabled
)
1359 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1360 VFIOBAR
*bar
= &vdev
->bars
[i
];
1366 memory_region_set_enabled(&bar
->mmap_mem
, enabled
);
1367 if (vdev
->msix
&& vdev
->msix
->table_bar
== i
) {
1368 memory_region_set_enabled(&vdev
->msix
->mmap_mem
, enabled
);
1373 static void vfio_unmap_bar(VFIODevice
*vdev
, int nr
)
1375 VFIOBAR
*bar
= &vdev
->bars
[nr
];
1381 memory_region_del_subregion(&bar
->mem
, &bar
->mmap_mem
);
1382 munmap(bar
->mmap
, memory_region_size(&bar
->mmap_mem
));
1384 if (vdev
->msix
&& vdev
->msix
->table_bar
== nr
) {
1385 memory_region_del_subregion(&bar
->mem
, &vdev
->msix
->mmap_mem
);
1386 munmap(vdev
->msix
->mmap
, memory_region_size(&vdev
->msix
->mmap_mem
));
1389 memory_region_destroy(&bar
->mem
);
1392 static int vfio_mmap_bar(VFIOBAR
*bar
, MemoryRegion
*mem
, MemoryRegion
*submem
,
1393 void **map
, size_t size
, off_t offset
,
1398 if (size
&& bar
->flags
& VFIO_REGION_INFO_FLAG_MMAP
) {
1401 if (bar
->flags
& VFIO_REGION_INFO_FLAG_READ
) {
1405 if (bar
->flags
& VFIO_REGION_INFO_FLAG_WRITE
) {
1409 *map
= mmap(NULL
, size
, prot
, MAP_SHARED
,
1410 bar
->fd
, bar
->fd_offset
+ offset
);
1411 if (*map
== MAP_FAILED
) {
1417 memory_region_init_ram_ptr(submem
, name
, size
, *map
);
1420 /* Create a zero sized sub-region to make cleanup easy. */
1421 memory_region_init(submem
, name
, 0);
1424 memory_region_add_subregion(mem
, offset
, submem
);
1429 static void vfio_map_bar(VFIODevice
*vdev
, int nr
)
1431 VFIOBAR
*bar
= &vdev
->bars
[nr
];
1432 unsigned size
= bar
->size
;
1438 /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
1443 snprintf(name
, sizeof(name
), "VFIO %04x:%02x:%02x.%x BAR %d",
1444 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1445 vdev
->host
.function
, nr
);
1447 /* Determine what type of BAR this is for registration */
1448 ret
= pread(vdev
->fd
, &pci_bar
, sizeof(pci_bar
),
1449 vdev
->config_offset
+ PCI_BASE_ADDRESS_0
+ (4 * nr
));
1450 if (ret
!= sizeof(pci_bar
)) {
1451 error_report("vfio: Failed to read BAR %d (%m)\n", nr
);
1455 pci_bar
= le32_to_cpu(pci_bar
);
1456 type
= pci_bar
& (pci_bar
& PCI_BASE_ADDRESS_SPACE_IO
?
1457 ~PCI_BASE_ADDRESS_IO_MASK
: ~PCI_BASE_ADDRESS_MEM_MASK
);
1459 /* A "slow" read/write mapping underlies all BARs */
1460 memory_region_init_io(&bar
->mem
, &vfio_bar_ops
, bar
, name
, size
);
1461 pci_register_bar(&vdev
->pdev
, nr
, type
, &bar
->mem
);
1464 * We can't mmap areas overlapping the MSIX vector table, so we
1465 * potentially insert a direct-mapped subregion before and after it.
1467 if (vdev
->msix
&& vdev
->msix
->table_bar
== nr
) {
1468 size
= vdev
->msix
->table_offset
& TARGET_PAGE_MASK
;
1471 strncat(name
, " mmap", sizeof(name
) - strlen(name
) - 1);
1472 if (vfio_mmap_bar(bar
, &bar
->mem
,
1473 &bar
->mmap_mem
, &bar
->mmap
, size
, 0, name
)) {
1474 error_report("%s unsupported. Performance may be slow\n", name
);
1477 if (vdev
->msix
&& vdev
->msix
->table_bar
== nr
) {
1480 start
= TARGET_PAGE_ALIGN(vdev
->msix
->table_offset
+
1481 (vdev
->msix
->entries
* PCI_MSIX_ENTRY_SIZE
));
1483 size
= start
< bar
->size
? bar
->size
- start
: 0;
1484 strncat(name
, " msix-hi", sizeof(name
) - strlen(name
) - 1);
1485 /* VFIOMSIXInfo contains another MemoryRegion for this mapping */
1486 if (vfio_mmap_bar(bar
, &bar
->mem
, &vdev
->msix
->mmap_mem
,
1487 &vdev
->msix
->mmap
, size
, start
, name
)) {
1488 error_report("%s unsupported. Performance may be slow\n", name
);
1493 static void vfio_map_bars(VFIODevice
*vdev
)
1497 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1498 vfio_map_bar(vdev
, i
);
1502 static void vfio_unmap_bars(VFIODevice
*vdev
)
1506 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1507 vfio_unmap_bar(vdev
, i
);
1514 static uint8_t vfio_std_cap_max_size(PCIDevice
*pdev
, uint8_t pos
)
1516 uint8_t tmp
, next
= 0xff;
1518 for (tmp
= pdev
->config
[PCI_CAPABILITY_LIST
]; tmp
;
1519 tmp
= pdev
->config
[tmp
+ 1]) {
1520 if (tmp
> pos
&& tmp
< next
) {
1528 static int vfio_add_std_cap(VFIODevice
*vdev
, uint8_t pos
)
1530 PCIDevice
*pdev
= &vdev
->pdev
;
1531 uint8_t cap_id
, next
, size
;
1534 cap_id
= pdev
->config
[pos
];
1535 next
= pdev
->config
[pos
+ 1];
1538 * If it becomes important to configure capabilities to their actual
1539 * size, use this as the default when it's something we don't recognize.
1540 * Since QEMU doesn't actually handle many of the config accesses,
1541 * exact size doesn't seem worthwhile.
1543 size
= vfio_std_cap_max_size(pdev
, pos
);
1546 * pci_add_capability always inserts the new capability at the head
1547 * of the chain. Therefore to end up with a chain that matches the
1548 * physical device, we insert from the end by making this recursive.
1549 * This is also why we pre-caclulate size above as cached config space
1550 * will be changed as we unwind the stack.
1553 ret
= vfio_add_std_cap(vdev
, next
);
1558 pdev
->config
[PCI_CAPABILITY_LIST
] = 0; /* Begin the rebuild */
1562 case PCI_CAP_ID_MSI
:
1563 ret
= vfio_setup_msi(vdev
, pos
);
1565 case PCI_CAP_ID_MSIX
:
1566 ret
= vfio_setup_msix(vdev
, pos
);
1569 ret
= pci_add_capability(pdev
, cap_id
, pos
, size
);
1574 error_report("vfio: %04x:%02x:%02x.%x Error adding PCI capability "
1575 "0x%x[0x%x]@0x%x: %d\n", vdev
->host
.domain
,
1576 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
,
1577 cap_id
, size
, pos
, ret
);
1584 static int vfio_add_capabilities(VFIODevice
*vdev
)
1586 PCIDevice
*pdev
= &vdev
->pdev
;
1588 if (!(pdev
->config
[PCI_STATUS
] & PCI_STATUS_CAP_LIST
) ||
1589 !pdev
->config
[PCI_CAPABILITY_LIST
]) {
1590 return 0; /* Nothing to add */
1593 return vfio_add_std_cap(vdev
, pdev
->config
[PCI_CAPABILITY_LIST
]);
1596 static int vfio_load_rom(VFIODevice
*vdev
)
1598 uint64_t size
= vdev
->rom_size
;
1600 off_t off
= 0, voff
= vdev
->rom_offset
;
1604 /* If loading ROM from file, pci handles it */
1605 if (vdev
->pdev
.romfile
|| !vdev
->pdev
.rom_bar
|| !size
) {
1609 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
1610 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
1612 snprintf(name
, sizeof(name
), "vfio[%04x:%02x:%02x.%x].rom",
1613 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1614 vdev
->host
.function
);
1615 memory_region_init_ram(&vdev
->pdev
.rom
, name
, size
);
1616 ptr
= memory_region_get_ram_ptr(&vdev
->pdev
.rom
);
1617 memset(ptr
, 0xff, size
);
1620 bytes
= pread(vdev
->fd
, ptr
+ off
, size
, voff
+ off
);
1622 break; /* expect that we could get back less than the ROM BAR */
1623 } else if (bytes
> 0) {
1627 if (errno
== EINTR
|| errno
== EAGAIN
) {
1630 error_report("vfio: Error reading device ROM: %m\n");
1631 memory_region_destroy(&vdev
->pdev
.rom
);
1636 pci_register_bar(&vdev
->pdev
, PCI_ROM_SLOT
, 0, &vdev
->pdev
.rom
);
1637 vdev
->pdev
.has_rom
= true;
1641 static int vfio_connect_container(VFIOGroup
*group
)
1643 VFIOContainer
*container
;
1646 if (group
->container
) {
1650 QLIST_FOREACH(container
, &container_list
, next
) {
1651 if (!ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &container
->fd
)) {
1652 group
->container
= container
;
1653 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
1658 fd
= qemu_open("/dev/vfio/vfio", O_RDWR
);
1660 error_report("vfio: failed to open /dev/vfio/vfio: %m\n");
1664 ret
= ioctl(fd
, VFIO_GET_API_VERSION
);
1665 if (ret
!= VFIO_API_VERSION
) {
1666 error_report("vfio: supported vfio version: %d, "
1667 "reported version: %d\n", VFIO_API_VERSION
, ret
);
1672 container
= g_malloc0(sizeof(*container
));
1675 if (ioctl(fd
, VFIO_CHECK_EXTENSION
, VFIO_TYPE1_IOMMU
)) {
1676 ret
= ioctl(group
->fd
, VFIO_GROUP_SET_CONTAINER
, &fd
);
1678 error_report("vfio: failed to set group container: %m\n");
1684 ret
= ioctl(fd
, VFIO_SET_IOMMU
, VFIO_TYPE1_IOMMU
);
1686 error_report("vfio: failed to set iommu for container: %m\n");
1692 container
->iommu_data
.listener
= vfio_memory_listener
;
1693 container
->iommu_data
.release
= vfio_listener_release
;
1695 memory_listener_register(&container
->iommu_data
.listener
, &address_space_memory
);
1697 error_report("vfio: No available IOMMU models\n");
1703 QLIST_INIT(&container
->group_list
);
1704 QLIST_INSERT_HEAD(&container_list
, container
, next
);
1706 group
->container
= container
;
1707 QLIST_INSERT_HEAD(&container
->group_list
, group
, container_next
);
1712 static void vfio_disconnect_container(VFIOGroup
*group
)
1714 VFIOContainer
*container
= group
->container
;
1716 if (ioctl(group
->fd
, VFIO_GROUP_UNSET_CONTAINER
, &container
->fd
)) {
1717 error_report("vfio: error disconnecting group %d from container\n",
1721 QLIST_REMOVE(group
, container_next
);
1722 group
->container
= NULL
;
1724 if (QLIST_EMPTY(&container
->group_list
)) {
1725 if (container
->iommu_data
.release
) {
1726 container
->iommu_data
.release(container
);
1728 QLIST_REMOVE(container
, next
);
1729 DPRINTF("vfio_disconnect_container: close container->fd\n");
1730 close(container
->fd
);
1735 static VFIOGroup
*vfio_get_group(int groupid
)
1739 struct vfio_group_status status
= { .argsz
= sizeof(status
) };
1741 QLIST_FOREACH(group
, &group_list
, next
) {
1742 if (group
->groupid
== groupid
) {
1747 group
= g_malloc0(sizeof(*group
));
1749 snprintf(path
, sizeof(path
), "/dev/vfio/%d", groupid
);
1750 group
->fd
= qemu_open(path
, O_RDWR
);
1751 if (group
->fd
< 0) {
1752 error_report("vfio: error opening %s: %m\n", path
);
1757 if (ioctl(group
->fd
, VFIO_GROUP_GET_STATUS
, &status
)) {
1758 error_report("vfio: error getting group status: %m\n");
1764 if (!(status
.flags
& VFIO_GROUP_FLAGS_VIABLE
)) {
1765 error_report("vfio: error, group %d is not viable, please ensure "
1766 "all devices within the iommu_group are bound to their "
1767 "vfio bus driver.\n", groupid
);
1773 group
->groupid
= groupid
;
1774 QLIST_INIT(&group
->device_list
);
1776 if (vfio_connect_container(group
)) {
1777 error_report("vfio: failed to setup container for group %d\n", groupid
);
1783 QLIST_INSERT_HEAD(&group_list
, group
, next
);
1788 static void vfio_put_group(VFIOGroup
*group
)
1790 if (!QLIST_EMPTY(&group
->device_list
)) {
1794 vfio_disconnect_container(group
);
1795 QLIST_REMOVE(group
, next
);
1796 DPRINTF("vfio_put_group: close group->fd\n");
1801 static int vfio_get_device(VFIOGroup
*group
, const char *name
, VFIODevice
*vdev
)
1803 struct vfio_device_info dev_info
= { .argsz
= sizeof(dev_info
) };
1804 struct vfio_region_info reg_info
= { .argsz
= sizeof(reg_info
) };
1807 ret
= ioctl(group
->fd
, VFIO_GROUP_GET_DEVICE_FD
, name
);
1809 error_report("vfio: error getting device %s from group %d: %m\n",
1810 name
, group
->groupid
);
1811 error_report("Verify all devices in group %d are bound to vfio-pci "
1812 "or pci-stub and not already in use\n", group
->groupid
);
1817 vdev
->group
= group
;
1818 QLIST_INSERT_HEAD(&group
->device_list
, vdev
, next
);
1820 /* Sanity check device */
1821 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_INFO
, &dev_info
);
1823 error_report("vfio: error getting device info: %m\n");
1827 DPRINTF("Device %s flags: %u, regions: %u, irgs: %u\n", name
,
1828 dev_info
.flags
, dev_info
.num_regions
, dev_info
.num_irqs
);
1830 if (!(dev_info
.flags
& VFIO_DEVICE_FLAGS_PCI
)) {
1831 error_report("vfio: Um, this isn't a PCI device\n");
1835 vdev
->reset_works
= !!(dev_info
.flags
& VFIO_DEVICE_FLAGS_RESET
);
1836 if (!vdev
->reset_works
) {
1837 error_report("Warning, device %s does not support reset\n", name
);
1840 if (dev_info
.num_regions
< VFIO_PCI_CONFIG_REGION_INDEX
+ 1) {
1841 error_report("vfio: unexpected number of io regions %u\n",
1842 dev_info
.num_regions
);
1846 if (dev_info
.num_irqs
< VFIO_PCI_MSIX_IRQ_INDEX
+ 1) {
1847 error_report("vfio: unexpected number of irqs %u\n", dev_info
.num_irqs
);
1851 for (i
= VFIO_PCI_BAR0_REGION_INDEX
; i
< VFIO_PCI_ROM_REGION_INDEX
; i
++) {
1854 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_REGION_INFO
, ®_info
);
1856 error_report("vfio: Error getting region %d info: %m\n", i
);
1860 DPRINTF("Device %s region %d:\n", name
, i
);
1861 DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
1862 (unsigned long)reg_info
.size
, (unsigned long)reg_info
.offset
,
1863 (unsigned long)reg_info
.flags
);
1865 vdev
->bars
[i
].flags
= reg_info
.flags
;
1866 vdev
->bars
[i
].size
= reg_info
.size
;
1867 vdev
->bars
[i
].fd_offset
= reg_info
.offset
;
1868 vdev
->bars
[i
].fd
= vdev
->fd
;
1869 vdev
->bars
[i
].nr
= i
;
1872 reg_info
.index
= VFIO_PCI_ROM_REGION_INDEX
;
1874 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_REGION_INFO
, ®_info
);
1876 error_report("vfio: Error getting ROM info: %m\n");
1880 DPRINTF("Device %s ROM:\n", name
);
1881 DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
1882 (unsigned long)reg_info
.size
, (unsigned long)reg_info
.offset
,
1883 (unsigned long)reg_info
.flags
);
1885 vdev
->rom_size
= reg_info
.size
;
1886 vdev
->rom_offset
= reg_info
.offset
;
1888 reg_info
.index
= VFIO_PCI_CONFIG_REGION_INDEX
;
1890 ret
= ioctl(vdev
->fd
, VFIO_DEVICE_GET_REGION_INFO
, ®_info
);
1892 error_report("vfio: Error getting config info: %m\n");
1896 DPRINTF("Device %s config:\n", name
);
1897 DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
1898 (unsigned long)reg_info
.size
, (unsigned long)reg_info
.offset
,
1899 (unsigned long)reg_info
.flags
);
1901 vdev
->config_size
= reg_info
.size
;
1902 if (vdev
->config_size
== PCI_CONFIG_SPACE_SIZE
) {
1903 vdev
->pdev
.cap_present
&= ~QEMU_PCI_CAP_EXPRESS
;
1905 vdev
->config_offset
= reg_info
.offset
;
1909 QLIST_REMOVE(vdev
, next
);
1916 static void vfio_put_device(VFIODevice
*vdev
)
1918 QLIST_REMOVE(vdev
, next
);
1920 DPRINTF("vfio_put_device: close vdev->fd\n");
1928 static int vfio_initfn(PCIDevice
*pdev
)
1930 VFIODevice
*pvdev
, *vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
1932 char path
[PATH_MAX
], iommu_group_path
[PATH_MAX
], *group_name
;
1938 /* Check that the host device exists */
1939 snprintf(path
, sizeof(path
),
1940 "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/",
1941 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1942 vdev
->host
.function
);
1943 if (stat(path
, &st
) < 0) {
1944 error_report("vfio: error: no such host device: %s\n", path
);
1948 strncat(path
, "iommu_group", sizeof(path
) - strlen(path
) - 1);
1950 len
= readlink(path
, iommu_group_path
, PATH_MAX
);
1952 error_report("vfio: error no iommu_group for device\n");
1956 iommu_group_path
[len
] = 0;
1957 group_name
= basename(iommu_group_path
);
1959 if (sscanf(group_name
, "%d", &groupid
) != 1) {
1960 error_report("vfio: error reading %s: %m\n", path
);
1964 DPRINTF("%s(%04x:%02x:%02x.%x) group %d\n", __func__
, vdev
->host
.domain
,
1965 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
, groupid
);
1967 group
= vfio_get_group(groupid
);
1969 error_report("vfio: failed to get group %d\n", groupid
);
1973 snprintf(path
, sizeof(path
), "%04x:%02x:%02x.%01x",
1974 vdev
->host
.domain
, vdev
->host
.bus
, vdev
->host
.slot
,
1975 vdev
->host
.function
);
1977 QLIST_FOREACH(pvdev
, &group
->device_list
, next
) {
1978 if (pvdev
->host
.domain
== vdev
->host
.domain
&&
1979 pvdev
->host
.bus
== vdev
->host
.bus
&&
1980 pvdev
->host
.slot
== vdev
->host
.slot
&&
1981 pvdev
->host
.function
== vdev
->host
.function
) {
1983 error_report("vfio: error: device %s is already attached\n", path
);
1984 vfio_put_group(group
);
1989 ret
= vfio_get_device(group
, path
, vdev
);
1991 error_report("vfio: failed to get device %s\n", path
);
1992 vfio_put_group(group
);
1996 /* Get a copy of config space */
1997 ret
= pread(vdev
->fd
, vdev
->pdev
.config
,
1998 MIN(pci_config_size(&vdev
->pdev
), vdev
->config_size
),
1999 vdev
->config_offset
);
2000 if (ret
< (int)MIN(pci_config_size(&vdev
->pdev
), vdev
->config_size
)) {
2001 ret
= ret
< 0 ? -errno
: -EFAULT
;
2002 error_report("vfio: Failed to read device config space\n");
2007 * Clear host resource mapping info. If we choose not to register a
2008 * BAR, such as might be the case with the option ROM, we can get
2009 * confusing, unwritable, residual addresses from the host here.
2011 memset(&vdev
->pdev
.config
[PCI_BASE_ADDRESS_0
], 0, 24);
2012 memset(&vdev
->pdev
.config
[PCI_ROM_ADDRESS
], 0, 4);
2014 vfio_load_rom(vdev
);
2016 ret
= vfio_early_setup_msix(vdev
);
2021 vfio_map_bars(vdev
);
2023 ret
= vfio_add_capabilities(vdev
);
2028 if (vfio_pci_read_config(&vdev
->pdev
, PCI_INTERRUPT_PIN
, 1)) {
2029 vdev
->intx
.mmap_timer
= qemu_new_timer_ms(vm_clock
,
2030 vfio_intx_mmap_enable
, vdev
);
2031 pci_device_set_intx_routing_notifier(&vdev
->pdev
, vfio_update_irq
);
2032 ret
= vfio_enable_intx(vdev
);
2041 pci_device_set_intx_routing_notifier(&vdev
->pdev
, NULL
);
2042 vfio_teardown_msi(vdev
);
2043 vfio_unmap_bars(vdev
);
2045 vfio_put_device(vdev
);
2046 vfio_put_group(group
);
2050 static void vfio_exitfn(PCIDevice
*pdev
)
2052 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
2053 VFIOGroup
*group
= vdev
->group
;
2055 pci_device_set_intx_routing_notifier(&vdev
->pdev
, NULL
);
2056 vfio_disable_interrupts(vdev
);
2057 if (vdev
->intx
.mmap_timer
) {
2058 qemu_free_timer(vdev
->intx
.mmap_timer
);
2060 vfio_teardown_msi(vdev
);
2061 vfio_unmap_bars(vdev
);
2062 vfio_put_device(vdev
);
2063 vfio_put_group(group
);
2066 static void vfio_pci_reset(DeviceState
*dev
)
2068 PCIDevice
*pdev
= DO_UPCAST(PCIDevice
, qdev
, dev
);
2069 VFIODevice
*vdev
= DO_UPCAST(VFIODevice
, pdev
, pdev
);
2072 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__
, vdev
->host
.domain
,
2073 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
2075 vfio_disable_interrupts(vdev
);
2078 * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
2079 * Also put INTx Disable in known state.
2081 cmd
= vfio_pci_read_config(pdev
, PCI_COMMAND
, 2);
2082 cmd
&= ~(PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
|
2083 PCI_COMMAND_INTX_DISABLE
);
2084 vfio_pci_write_config(pdev
, PCI_COMMAND
, cmd
, 2);
2086 if (vdev
->reset_works
) {
2087 if (ioctl(vdev
->fd
, VFIO_DEVICE_RESET
)) {
2088 error_report("vfio: Error unable to reset physical device "
2089 "(%04x:%02x:%02x.%x): %m\n", vdev
->host
.domain
,
2090 vdev
->host
.bus
, vdev
->host
.slot
, vdev
->host
.function
);
2094 vfio_enable_intx(vdev
);
2097 static Property vfio_pci_dev_properties
[] = {
2098 DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIODevice
, host
),
2099 DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIODevice
,
2100 intx
.mmap_timeout
, 1100),
2102 * TODO - support passed fds... is this necessary?
2103 * DEFINE_PROP_STRING("vfiofd", VFIODevice, vfiofd_name),
2104 * DEFINE_PROP_STRING("vfiogroupfd, VFIODevice, vfiogroupfd_name),
2106 DEFINE_PROP_END_OF_LIST(),
2109 static const VMStateDescription vfio_pci_vmstate
= {
2114 static void vfio_pci_dev_class_init(ObjectClass
*klass
, void *data
)
2116 DeviceClass
*dc
= DEVICE_CLASS(klass
);
2117 PCIDeviceClass
*pdc
= PCI_DEVICE_CLASS(klass
);
2119 dc
->reset
= vfio_pci_reset
;
2120 dc
->props
= vfio_pci_dev_properties
;
2121 dc
->vmsd
= &vfio_pci_vmstate
;
2122 dc
->desc
= "VFIO-based PCI device assignment";
2123 pdc
->init
= vfio_initfn
;
2124 pdc
->exit
= vfio_exitfn
;
2125 pdc
->config_read
= vfio_pci_read_config
;
2126 pdc
->config_write
= vfio_pci_write_config
;
2127 pdc
->is_express
= 1; /* We might be */
2130 static const TypeInfo vfio_pci_dev_info
= {
2132 .parent
= TYPE_PCI_DEVICE
,
2133 .instance_size
= sizeof(VFIODevice
),
2134 .class_init
= vfio_pci_dev_class_init
,
2137 static void register_vfio_pci_dev_type(void)
2139 type_register_static(&vfio_pci_dev_info
);
2142 type_init(register_vfio_pci_dev_type
)