2 * vfio based device assignment support
4 * Copyright Red Hat, Inc. 2012
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
21 #include "qemu/osdep.h"
22 #include CONFIG_DEVICES /* CONFIG_IOMMUFD */
23 #include <linux/vfio.h>
24 #include <sys/ioctl.h>
27 #include "hw/pci/msi.h"
28 #include "hw/pci/msix.h"
29 #include "hw/pci/pci_bridge.h"
30 #include "hw/qdev-properties.h"
31 #include "hw/qdev-properties-system.h"
32 #include "migration/vmstate.h"
33 #include "qapi/qmp/qdict.h"
34 #include "qemu/error-report.h"
35 #include "qemu/main-loop.h"
36 #include "qemu/module.h"
37 #include "qemu/range.h"
38 #include "qemu/units.h"
39 #include "sysemu/kvm.h"
40 #include "sysemu/runstate.h"
43 #include "qapi/error.h"
44 #include "migration/blocker.h"
45 #include "migration/qemu-file.h"
46 #include "sysemu/iommufd.h"
48 #define TYPE_VFIO_PCI_NOHOTPLUG "vfio-pci-nohotplug"
50 /* Protected by BQL */
51 static KVMRouteChange vfio_route_change
;
53 static void vfio_disable_interrupts(VFIOPCIDevice
*vdev
);
54 static void vfio_mmap_set_enabled(VFIOPCIDevice
*vdev
, bool enabled
);
55 static void vfio_msi_disable_common(VFIOPCIDevice
*vdev
);
58 * Disabling BAR mmaping can be slow, but toggling it around INTx can
59 * also be a huge overhead. We try to get the best of both worlds by
60 * waiting until an interrupt to disable mmaps (subsequent transitions
61 * to the same state are effectively no overhead). If the interrupt has
62 * been serviced and the time gap is long enough, we re-enable mmaps for
63 * performance. This works well for things like graphics cards, which
64 * may not use their interrupt at all and are penalized to an unusable
65 * level by read/write BAR traps. Other devices, like NICs, have more
66 * regular interrupts and see much better latency by staying in non-mmap
67 * mode. We therefore set the default mmap_timeout such that a ping
68 * is just enough to keep the mmap disabled. Users can experiment with
69 * other options with the x-intx-mmap-timeout-ms parameter (a value of
70 * zero disables the timer).
72 static void vfio_intx_mmap_enable(void *opaque
)
74 VFIOPCIDevice
*vdev
= opaque
;
76 if (vdev
->intx
.pending
) {
77 timer_mod(vdev
->intx
.mmap_timer
,
78 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + vdev
->intx
.mmap_timeout
);
82 vfio_mmap_set_enabled(vdev
, true);
85 static void vfio_intx_interrupt(void *opaque
)
87 VFIOPCIDevice
*vdev
= opaque
;
89 if (!event_notifier_test_and_clear(&vdev
->intx
.interrupt
)) {
93 trace_vfio_intx_interrupt(vdev
->vbasedev
.name
, 'A' + vdev
->intx
.pin
);
95 vdev
->intx
.pending
= true;
96 pci_irq_assert(&vdev
->pdev
);
97 vfio_mmap_set_enabled(vdev
, false);
98 if (vdev
->intx
.mmap_timeout
) {
99 timer_mod(vdev
->intx
.mmap_timer
,
100 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + vdev
->intx
.mmap_timeout
);
104 static void vfio_intx_eoi(VFIODevice
*vbasedev
)
106 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
108 if (!vdev
->intx
.pending
) {
112 trace_vfio_intx_eoi(vbasedev
->name
);
114 vdev
->intx
.pending
= false;
115 pci_irq_deassert(&vdev
->pdev
);
116 vfio_unmask_single_irqindex(vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
119 static bool vfio_intx_enable_kvm(VFIOPCIDevice
*vdev
, Error
**errp
)
122 int irq_fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
);
124 if (vdev
->no_kvm_intx
|| !kvm_irqfds_enabled() ||
125 vdev
->intx
.route
.mode
!= PCI_INTX_ENABLED
||
126 !kvm_resamplefds_enabled()) {
130 /* Get to a known interrupt state */
131 qemu_set_fd_handler(irq_fd
, NULL
, NULL
, vdev
);
132 vfio_mask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
133 vdev
->intx
.pending
= false;
134 pci_irq_deassert(&vdev
->pdev
);
136 /* Get an eventfd for resample/unmask */
137 if (event_notifier_init(&vdev
->intx
.unmask
, 0)) {
138 error_setg(errp
, "event_notifier_init failed eoi");
142 if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state
,
143 &vdev
->intx
.interrupt
,
145 vdev
->intx
.route
.irq
)) {
146 error_setg_errno(errp
, errno
, "failed to setup resample irqfd");
150 if (!vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
, 0,
151 VFIO_IRQ_SET_ACTION_UNMASK
,
152 event_notifier_get_fd(&vdev
->intx
.unmask
),
158 vfio_unmask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
160 vdev
->intx
.kvm_accel
= true;
162 trace_vfio_intx_enable_kvm(vdev
->vbasedev
.name
);
167 kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state
, &vdev
->intx
.interrupt
,
168 vdev
->intx
.route
.irq
);
170 event_notifier_cleanup(&vdev
->intx
.unmask
);
172 qemu_set_fd_handler(irq_fd
, vfio_intx_interrupt
, NULL
, vdev
);
173 vfio_unmask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
180 static void vfio_intx_disable_kvm(VFIOPCIDevice
*vdev
)
183 if (!vdev
->intx
.kvm_accel
) {
188 * Get to a known state, hardware masked, QEMU ready to accept new
189 * interrupts, QEMU IRQ de-asserted.
191 vfio_mask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
192 vdev
->intx
.pending
= false;
193 pci_irq_deassert(&vdev
->pdev
);
195 /* Tell KVM to stop listening for an INTx irqfd */
196 if (kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state
, &vdev
->intx
.interrupt
,
197 vdev
->intx
.route
.irq
)) {
198 error_report("vfio: Error: Failed to disable INTx irqfd: %m");
201 /* We only need to close the eventfd for VFIO to cleanup the kernel side */
202 event_notifier_cleanup(&vdev
->intx
.unmask
);
204 /* QEMU starts listening for interrupt events. */
205 qemu_set_fd_handler(event_notifier_get_fd(&vdev
->intx
.interrupt
),
206 vfio_intx_interrupt
, NULL
, vdev
);
208 vdev
->intx
.kvm_accel
= false;
210 /* If we've missed an event, let it re-fire through QEMU */
211 vfio_unmask_single_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
213 trace_vfio_intx_disable_kvm(vdev
->vbasedev
.name
);
217 static void vfio_intx_update(VFIOPCIDevice
*vdev
, PCIINTxRoute
*route
)
221 trace_vfio_intx_update(vdev
->vbasedev
.name
,
222 vdev
->intx
.route
.irq
, route
->irq
);
224 vfio_intx_disable_kvm(vdev
);
226 vdev
->intx
.route
= *route
;
228 if (route
->mode
!= PCI_INTX_ENABLED
) {
232 if (!vfio_intx_enable_kvm(vdev
, &err
)) {
233 warn_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
236 /* Re-enable the interrupt in cased we missed an EOI */
237 vfio_intx_eoi(&vdev
->vbasedev
);
240 static void vfio_intx_routing_notifier(PCIDevice
*pdev
)
242 VFIOPCIDevice
*vdev
= VFIO_PCI(pdev
);
245 if (vdev
->interrupt
!= VFIO_INT_INTx
) {
249 route
= pci_device_route_intx_to_irq(&vdev
->pdev
, vdev
->intx
.pin
);
251 if (pci_intx_route_changed(&vdev
->intx
.route
, &route
)) {
252 vfio_intx_update(vdev
, &route
);
256 static void vfio_irqchip_change(Notifier
*notify
, void *data
)
258 VFIOPCIDevice
*vdev
= container_of(notify
, VFIOPCIDevice
,
259 irqchip_change_notifier
);
261 vfio_intx_update(vdev
, &vdev
->intx
.route
);
264 static bool vfio_intx_enable(VFIOPCIDevice
*vdev
, Error
**errp
)
266 uint8_t pin
= vfio_pci_read_config(&vdev
->pdev
, PCI_INTERRUPT_PIN
, 1);
276 vfio_disable_interrupts(vdev
);
278 vdev
->intx
.pin
= pin
- 1; /* Pin A (1) -> irq[0] */
279 pci_config_set_interrupt_pin(vdev
->pdev
.config
, pin
);
283 * Only conditional to avoid generating error messages on platforms
284 * where we won't actually use the result anyway.
286 if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) {
287 vdev
->intx
.route
= pci_device_route_intx_to_irq(&vdev
->pdev
,
292 ret
= event_notifier_init(&vdev
->intx
.interrupt
, 0);
294 error_setg_errno(errp
, -ret
, "event_notifier_init failed");
297 fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
);
298 qemu_set_fd_handler(fd
, vfio_intx_interrupt
, NULL
, vdev
);
300 if (!vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
, 0,
301 VFIO_IRQ_SET_ACTION_TRIGGER
, fd
, errp
)) {
302 qemu_set_fd_handler(fd
, NULL
, NULL
, vdev
);
303 event_notifier_cleanup(&vdev
->intx
.interrupt
);
307 if (!vfio_intx_enable_kvm(vdev
, &err
)) {
308 warn_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
311 vdev
->interrupt
= VFIO_INT_INTx
;
313 trace_vfio_intx_enable(vdev
->vbasedev
.name
);
317 static void vfio_intx_disable(VFIOPCIDevice
*vdev
)
321 timer_del(vdev
->intx
.mmap_timer
);
322 vfio_intx_disable_kvm(vdev
);
323 vfio_disable_irqindex(&vdev
->vbasedev
, VFIO_PCI_INTX_IRQ_INDEX
);
324 vdev
->intx
.pending
= false;
325 pci_irq_deassert(&vdev
->pdev
);
326 vfio_mmap_set_enabled(vdev
, true);
328 fd
= event_notifier_get_fd(&vdev
->intx
.interrupt
);
329 qemu_set_fd_handler(fd
, NULL
, NULL
, vdev
);
330 event_notifier_cleanup(&vdev
->intx
.interrupt
);
332 vdev
->interrupt
= VFIO_INT_NONE
;
334 trace_vfio_intx_disable(vdev
->vbasedev
.name
);
340 static void vfio_msi_interrupt(void *opaque
)
342 VFIOMSIVector
*vector
= opaque
;
343 VFIOPCIDevice
*vdev
= vector
->vdev
;
344 MSIMessage (*get_msg
)(PCIDevice
*dev
, unsigned vector
);
345 void (*notify
)(PCIDevice
*dev
, unsigned vector
);
347 int nr
= vector
- vdev
->msi_vectors
;
349 if (!event_notifier_test_and_clear(&vector
->interrupt
)) {
353 if (vdev
->interrupt
== VFIO_INT_MSIX
) {
354 get_msg
= msix_get_message
;
355 notify
= msix_notify
;
357 /* A masked vector firing needs to use the PBA, enable it */
358 if (msix_is_masked(&vdev
->pdev
, nr
)) {
359 set_bit(nr
, vdev
->msix
->pending
);
360 memory_region_set_enabled(&vdev
->pdev
.msix_pba_mmio
, true);
361 trace_vfio_msix_pba_enable(vdev
->vbasedev
.name
);
363 } else if (vdev
->interrupt
== VFIO_INT_MSI
) {
364 get_msg
= msi_get_message
;
370 msg
= get_msg(&vdev
->pdev
, nr
);
371 trace_vfio_msi_interrupt(vdev
->vbasedev
.name
, nr
, msg
.address
, msg
.data
);
372 notify(&vdev
->pdev
, nr
);
376 * Get MSI-X enabled, but no vector enabled, by setting vector 0 with an invalid
379 static int vfio_enable_msix_no_vec(VFIOPCIDevice
*vdev
)
381 g_autofree
struct vfio_irq_set
*irq_set
= NULL
;
385 argsz
= sizeof(*irq_set
) + sizeof(*fd
);
387 irq_set
= g_malloc0(argsz
);
388 irq_set
->argsz
= argsz
;
389 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
|
390 VFIO_IRQ_SET_ACTION_TRIGGER
;
391 irq_set
->index
= VFIO_PCI_MSIX_IRQ_INDEX
;
394 fd
= (int32_t *)&irq_set
->data
;
397 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
402 static int vfio_enable_vectors(VFIOPCIDevice
*vdev
, bool msix
)
404 struct vfio_irq_set
*irq_set
;
405 int ret
= 0, i
, argsz
;
409 * If dynamic MSI-X allocation is supported, the vectors to be allocated
410 * and enabled can be scattered. Before kernel enabling MSI-X, setting
411 * nr_vectors causes all these vectors to be allocated on host.
413 * To keep allocation as needed, use vector 0 with an invalid fd to get
414 * MSI-X enabled first, then set vectors with a potentially sparse set of
415 * eventfds to enable interrupts only when enabled in guest.
417 if (msix
&& !vdev
->msix
->noresize
) {
418 ret
= vfio_enable_msix_no_vec(vdev
);
425 argsz
= sizeof(*irq_set
) + (vdev
->nr_vectors
* sizeof(*fds
));
427 irq_set
= g_malloc0(argsz
);
428 irq_set
->argsz
= argsz
;
429 irq_set
->flags
= VFIO_IRQ_SET_DATA_EVENTFD
| VFIO_IRQ_SET_ACTION_TRIGGER
;
430 irq_set
->index
= msix
? VFIO_PCI_MSIX_IRQ_INDEX
: VFIO_PCI_MSI_IRQ_INDEX
;
432 irq_set
->count
= vdev
->nr_vectors
;
433 fds
= (int32_t *)&irq_set
->data
;
435 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
439 * MSI vs MSI-X - The guest has direct access to MSI mask and pending
440 * bits, therefore we always use the KVM signaling path when setup.
441 * MSI-X mask and pending bits are emulated, so we want to use the
442 * KVM signaling path only when configured and unmasked.
444 if (vdev
->msi_vectors
[i
].use
) {
445 if (vdev
->msi_vectors
[i
].virq
< 0 ||
446 (msix
&& msix_is_masked(&vdev
->pdev
, i
))) {
447 fd
= event_notifier_get_fd(&vdev
->msi_vectors
[i
].interrupt
);
449 fd
= event_notifier_get_fd(&vdev
->msi_vectors
[i
].kvm_interrupt
);
456 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_SET_IRQS
, irq_set
);
463 static void vfio_add_kvm_msi_virq(VFIOPCIDevice
*vdev
, VFIOMSIVector
*vector
,
464 int vector_n
, bool msix
)
466 if ((msix
&& vdev
->no_kvm_msix
) || (!msix
&& vdev
->no_kvm_msi
)) {
470 vector
->virq
= kvm_irqchip_add_msi_route(&vfio_route_change
,
471 vector_n
, &vdev
->pdev
);
474 static void vfio_connect_kvm_msi_virq(VFIOMSIVector
*vector
)
476 if (vector
->virq
< 0) {
480 if (event_notifier_init(&vector
->kvm_interrupt
, 0)) {
484 if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state
, &vector
->kvm_interrupt
,
485 NULL
, vector
->virq
) < 0) {
492 event_notifier_cleanup(&vector
->kvm_interrupt
);
494 kvm_irqchip_release_virq(kvm_state
, vector
->virq
);
498 static void vfio_remove_kvm_msi_virq(VFIOMSIVector
*vector
)
500 kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state
, &vector
->kvm_interrupt
,
502 kvm_irqchip_release_virq(kvm_state
, vector
->virq
);
504 event_notifier_cleanup(&vector
->kvm_interrupt
);
507 static void vfio_update_kvm_msi_virq(VFIOMSIVector
*vector
, MSIMessage msg
,
510 kvm_irqchip_update_msi_route(kvm_state
, vector
->virq
, msg
, pdev
);
511 kvm_irqchip_commit_routes(kvm_state
);
514 static int vfio_msix_vector_do_use(PCIDevice
*pdev
, unsigned int nr
,
515 MSIMessage
*msg
, IOHandler
*handler
)
517 VFIOPCIDevice
*vdev
= VFIO_PCI(pdev
);
518 VFIOMSIVector
*vector
;
520 bool resizing
= !!(vdev
->nr_vectors
< nr
+ 1);
522 trace_vfio_msix_vector_do_use(vdev
->vbasedev
.name
, nr
);
524 vector
= &vdev
->msi_vectors
[nr
];
529 if (event_notifier_init(&vector
->interrupt
, 0)) {
530 error_report("vfio: Error: event_notifier_init failed");
533 msix_vector_use(pdev
, nr
);
536 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
537 handler
, NULL
, vector
);
540 * Attempt to enable route through KVM irqchip,
541 * default to userspace handling if unavailable.
543 if (vector
->virq
>= 0) {
545 vfio_remove_kvm_msi_virq(vector
);
547 vfio_update_kvm_msi_virq(vector
, *msg
, pdev
);
551 if (vdev
->defer_kvm_irq_routing
) {
552 vfio_add_kvm_msi_virq(vdev
, vector
, nr
, true);
554 vfio_route_change
= kvm_irqchip_begin_route_changes(kvm_state
);
555 vfio_add_kvm_msi_virq(vdev
, vector
, nr
, true);
556 kvm_irqchip_commit_route_changes(&vfio_route_change
);
557 vfio_connect_kvm_msi_virq(vector
);
563 * When dynamic allocation is not supported, we don't want to have the
564 * host allocate all possible MSI vectors for a device if they're not
565 * in use, so we shutdown and incrementally increase them as needed.
566 * nr_vectors represents the total number of vectors allocated.
568 * When dynamic allocation is supported, let the host only allocate
569 * and enable a vector when it is in use in guest. nr_vectors represents
570 * the upper bound of vectors being enabled (but not all of the ranges
571 * is allocated or enabled).
574 vdev
->nr_vectors
= nr
+ 1;
577 if (!vdev
->defer_kvm_irq_routing
) {
578 if (vdev
->msix
->noresize
&& resizing
) {
579 vfio_disable_irqindex(&vdev
->vbasedev
, VFIO_PCI_MSIX_IRQ_INDEX
);
580 ret
= vfio_enable_vectors(vdev
, true);
582 error_report("vfio: failed to enable vectors, %d", ret
);
588 if (vector
->virq
>= 0) {
589 fd
= event_notifier_get_fd(&vector
->kvm_interrupt
);
591 fd
= event_notifier_get_fd(&vector
->interrupt
);
594 if (!vfio_set_irq_signaling(&vdev
->vbasedev
,
595 VFIO_PCI_MSIX_IRQ_INDEX
, nr
,
596 VFIO_IRQ_SET_ACTION_TRIGGER
, fd
,
598 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
603 /* Disable PBA emulation when nothing more is pending. */
604 clear_bit(nr
, vdev
->msix
->pending
);
605 if (find_first_bit(vdev
->msix
->pending
,
606 vdev
->nr_vectors
) == vdev
->nr_vectors
) {
607 memory_region_set_enabled(&vdev
->pdev
.msix_pba_mmio
, false);
608 trace_vfio_msix_pba_disable(vdev
->vbasedev
.name
);
614 static int vfio_msix_vector_use(PCIDevice
*pdev
,
615 unsigned int nr
, MSIMessage msg
)
617 return vfio_msix_vector_do_use(pdev
, nr
, &msg
, vfio_msi_interrupt
);
620 static void vfio_msix_vector_release(PCIDevice
*pdev
, unsigned int nr
)
622 VFIOPCIDevice
*vdev
= VFIO_PCI(pdev
);
623 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[nr
];
625 trace_vfio_msix_vector_release(vdev
->vbasedev
.name
, nr
);
628 * There are still old guests that mask and unmask vectors on every
629 * interrupt. If we're using QEMU bypass with a KVM irqfd, leave all of
630 * the KVM setup in place, simply switch VFIO to use the non-bypass
631 * eventfd. We'll then fire the interrupt through QEMU and the MSI-X
632 * core will mask the interrupt and set pending bits, allowing it to
633 * be re-asserted on unmask. Nothing to do if already using QEMU mode.
635 if (vector
->virq
>= 0) {
636 int32_t fd
= event_notifier_get_fd(&vector
->interrupt
);
639 if (!vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_MSIX_IRQ_INDEX
,
640 nr
, VFIO_IRQ_SET_ACTION_TRIGGER
, fd
,
642 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
647 static void vfio_prepare_kvm_msi_virq_batch(VFIOPCIDevice
*vdev
)
649 assert(!vdev
->defer_kvm_irq_routing
);
650 vdev
->defer_kvm_irq_routing
= true;
651 vfio_route_change
= kvm_irqchip_begin_route_changes(kvm_state
);
654 static void vfio_commit_kvm_msi_virq_batch(VFIOPCIDevice
*vdev
)
658 assert(vdev
->defer_kvm_irq_routing
);
659 vdev
->defer_kvm_irq_routing
= false;
661 kvm_irqchip_commit_route_changes(&vfio_route_change
);
663 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
664 vfio_connect_kvm_msi_virq(&vdev
->msi_vectors
[i
]);
668 static void vfio_msix_enable(VFIOPCIDevice
*vdev
)
672 vfio_disable_interrupts(vdev
);
674 vdev
->msi_vectors
= g_new0(VFIOMSIVector
, vdev
->msix
->entries
);
676 vdev
->interrupt
= VFIO_INT_MSIX
;
679 * Setting vector notifiers triggers synchronous vector-use
680 * callbacks for each active vector. Deferring to commit the KVM
681 * routes once rather than per vector provides a substantial
682 * performance improvement.
684 vfio_prepare_kvm_msi_virq_batch(vdev
);
686 if (msix_set_vector_notifiers(&vdev
->pdev
, vfio_msix_vector_use
,
687 vfio_msix_vector_release
, NULL
)) {
688 error_report("vfio: msix_set_vector_notifiers failed");
691 vfio_commit_kvm_msi_virq_batch(vdev
);
693 if (vdev
->nr_vectors
) {
694 ret
= vfio_enable_vectors(vdev
, true);
696 error_report("vfio: failed to enable vectors, %d", ret
);
700 * Some communication channels between VF & PF or PF & fw rely on the
701 * physical state of the device and expect that enabling MSI-X from the
702 * guest enables the same on the host. When our guest is Linux, the
703 * guest driver call to pci_enable_msix() sets the enabling bit in the
704 * MSI-X capability, but leaves the vector table masked. We therefore
705 * can't rely on a vector_use callback (from request_irq() in the guest)
706 * to switch the physical device into MSI-X mode because that may come a
707 * long time after pci_enable_msix(). This code sets vector 0 with an
708 * invalid fd to make the physical device MSI-X enabled, but with no
709 * vectors enabled, just like the guest view.
711 ret
= vfio_enable_msix_no_vec(vdev
);
713 error_report("vfio: failed to enable MSI-X, %d", ret
);
717 trace_vfio_msix_enable(vdev
->vbasedev
.name
);
720 static void vfio_msi_enable(VFIOPCIDevice
*vdev
)
724 vfio_disable_interrupts(vdev
);
726 vdev
->nr_vectors
= msi_nr_vectors_allocated(&vdev
->pdev
);
729 * Setting vector notifiers needs to enable route for each vector.
730 * Deferring to commit the KVM routes once rather than per vector
731 * provides a substantial performance improvement.
733 vfio_prepare_kvm_msi_virq_batch(vdev
);
735 vdev
->msi_vectors
= g_new0(VFIOMSIVector
, vdev
->nr_vectors
);
737 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
738 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
744 if (event_notifier_init(&vector
->interrupt
, 0)) {
745 error_report("vfio: Error: event_notifier_init failed");
748 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
749 vfio_msi_interrupt
, NULL
, vector
);
752 * Attempt to enable route through KVM irqchip,
753 * default to userspace handling if unavailable.
755 vfio_add_kvm_msi_virq(vdev
, vector
, i
, false);
758 vfio_commit_kvm_msi_virq_batch(vdev
);
760 /* Set interrupt type prior to possible interrupts */
761 vdev
->interrupt
= VFIO_INT_MSI
;
763 ret
= vfio_enable_vectors(vdev
, false);
766 error_report("vfio: Error: Failed to setup MSI fds: %m");
768 error_report("vfio: Error: Failed to enable %d "
769 "MSI vectors, retry with %d", vdev
->nr_vectors
, ret
);
772 vfio_msi_disable_common(vdev
);
775 vdev
->nr_vectors
= ret
;
780 * Failing to setup MSI doesn't really fall within any specification.
781 * Let's try leaving interrupts disabled and hope the guest figures
782 * out to fall back to INTx for this device.
784 error_report("vfio: Error: Failed to enable MSI");
789 trace_vfio_msi_enable(vdev
->vbasedev
.name
, vdev
->nr_vectors
);
792 static void vfio_msi_disable_common(VFIOPCIDevice
*vdev
)
796 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
797 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
798 if (vdev
->msi_vectors
[i
].use
) {
799 if (vector
->virq
>= 0) {
800 vfio_remove_kvm_msi_virq(vector
);
802 qemu_set_fd_handler(event_notifier_get_fd(&vector
->interrupt
),
804 event_notifier_cleanup(&vector
->interrupt
);
808 g_free(vdev
->msi_vectors
);
809 vdev
->msi_vectors
= NULL
;
810 vdev
->nr_vectors
= 0;
811 vdev
->interrupt
= VFIO_INT_NONE
;
814 static void vfio_msix_disable(VFIOPCIDevice
*vdev
)
819 msix_unset_vector_notifiers(&vdev
->pdev
);
822 * MSI-X will only release vectors if MSI-X is still enabled on the
823 * device, check through the rest and release it ourselves if necessary.
825 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
826 if (vdev
->msi_vectors
[i
].use
) {
827 vfio_msix_vector_release(&vdev
->pdev
, i
);
828 msix_vector_unuse(&vdev
->pdev
, i
);
833 * Always clear MSI-X IRQ index. A PF device could have enabled
834 * MSI-X with no vectors. See vfio_msix_enable().
836 vfio_disable_irqindex(&vdev
->vbasedev
, VFIO_PCI_MSIX_IRQ_INDEX
);
838 vfio_msi_disable_common(vdev
);
839 if (!vfio_intx_enable(vdev
, &err
)) {
840 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
843 memset(vdev
->msix
->pending
, 0,
844 BITS_TO_LONGS(vdev
->msix
->entries
) * sizeof(unsigned long));
846 trace_vfio_msix_disable(vdev
->vbasedev
.name
);
849 static void vfio_msi_disable(VFIOPCIDevice
*vdev
)
853 vfio_disable_irqindex(&vdev
->vbasedev
, VFIO_PCI_MSI_IRQ_INDEX
);
854 vfio_msi_disable_common(vdev
);
855 vfio_intx_enable(vdev
, &err
);
857 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
860 trace_vfio_msi_disable(vdev
->vbasedev
.name
);
863 static void vfio_update_msi(VFIOPCIDevice
*vdev
)
867 for (i
= 0; i
< vdev
->nr_vectors
; i
++) {
868 VFIOMSIVector
*vector
= &vdev
->msi_vectors
[i
];
871 if (!vector
->use
|| vector
->virq
< 0) {
875 msg
= msi_get_message(&vdev
->pdev
, i
);
876 vfio_update_kvm_msi_virq(vector
, msg
, &vdev
->pdev
);
880 static void vfio_pci_load_rom(VFIOPCIDevice
*vdev
)
882 g_autofree
struct vfio_region_info
*reg_info
= NULL
;
887 if (vfio_get_region_info(&vdev
->vbasedev
,
888 VFIO_PCI_ROM_REGION_INDEX
, ®_info
)) {
889 error_report("vfio: Error getting ROM info: %m");
893 trace_vfio_pci_load_rom(vdev
->vbasedev
.name
, (unsigned long)reg_info
->size
,
894 (unsigned long)reg_info
->offset
,
895 (unsigned long)reg_info
->flags
);
897 vdev
->rom_size
= size
= reg_info
->size
;
898 vdev
->rom_offset
= reg_info
->offset
;
900 if (!vdev
->rom_size
) {
901 vdev
->rom_read_failed
= true;
902 error_report("vfio-pci: Cannot read device rom at "
903 "%s", vdev
->vbasedev
.name
);
904 error_printf("Device option ROM contents are probably invalid "
905 "(check dmesg).\nSkip option ROM probe with rombar=0, "
906 "or load from file with romfile=\n");
910 vdev
->rom
= g_malloc(size
);
911 memset(vdev
->rom
, 0xff, size
);
914 bytes
= pread(vdev
->vbasedev
.fd
, vdev
->rom
+ off
,
915 size
, vdev
->rom_offset
+ off
);
918 } else if (bytes
> 0) {
922 if (errno
== EINTR
|| errno
== EAGAIN
) {
925 error_report("vfio: Error reading device ROM: %m");
931 * Test the ROM signature against our device, if the vendor is correct
932 * but the device ID doesn't match, store the correct device ID and
933 * recompute the checksum. Intel IGD devices need this and are known
934 * to have bogus checksums so we can't simply adjust the checksum.
936 if (pci_get_word(vdev
->rom
) == 0xaa55 &&
937 pci_get_word(vdev
->rom
+ 0x18) + 8 < vdev
->rom_size
&&
938 !memcmp(vdev
->rom
+ pci_get_word(vdev
->rom
+ 0x18), "PCIR", 4)) {
941 vid
= pci_get_word(vdev
->rom
+ pci_get_word(vdev
->rom
+ 0x18) + 4);
942 did
= pci_get_word(vdev
->rom
+ pci_get_word(vdev
->rom
+ 0x18) + 6);
944 if (vid
== vdev
->vendor_id
&& did
!= vdev
->device_id
) {
946 uint8_t csum
, *data
= vdev
->rom
;
948 pci_set_word(vdev
->rom
+ pci_get_word(vdev
->rom
+ 0x18) + 6,
952 for (csum
= 0, i
= 0; i
< vdev
->rom_size
; i
++) {
961 static uint64_t vfio_rom_read(void *opaque
, hwaddr addr
, unsigned size
)
963 VFIOPCIDevice
*vdev
= opaque
;
972 /* Load the ROM lazily when the guest tries to read it */
973 if (unlikely(!vdev
->rom
&& !vdev
->rom_read_failed
)) {
974 vfio_pci_load_rom(vdev
);
977 memcpy(&val
, vdev
->rom
+ addr
,
978 (addr
< vdev
->rom_size
) ? MIN(size
, vdev
->rom_size
- addr
) : 0);
985 data
= le16_to_cpu(val
.word
);
988 data
= le32_to_cpu(val
.dword
);
991 hw_error("vfio: unsupported read size, %d bytes\n", size
);
995 trace_vfio_rom_read(vdev
->vbasedev
.name
, addr
, size
, data
);
1000 static void vfio_rom_write(void *opaque
, hwaddr addr
,
1001 uint64_t data
, unsigned size
)
1005 static const MemoryRegionOps vfio_rom_ops
= {
1006 .read
= vfio_rom_read
,
1007 .write
= vfio_rom_write
,
1008 .endianness
= DEVICE_LITTLE_ENDIAN
,
1011 static void vfio_pci_size_rom(VFIOPCIDevice
*vdev
)
1013 uint32_t orig
, size
= cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK
);
1014 off_t offset
= vdev
->config_offset
+ PCI_ROM_ADDRESS
;
1015 DeviceState
*dev
= DEVICE(vdev
);
1017 int fd
= vdev
->vbasedev
.fd
;
1019 if (vdev
->pdev
.romfile
|| !vdev
->pdev
.rom_bar
) {
1020 /* Since pci handles romfile, just print a message and return */
1021 if (vfio_opt_rom_in_denylist(vdev
) && vdev
->pdev
.romfile
) {
1022 warn_report("Device at %s is known to cause system instability"
1023 " issues during option rom execution",
1024 vdev
->vbasedev
.name
);
1025 error_printf("Proceeding anyway since user specified romfile\n");
1031 * Use the same size ROM BAR as the physical device. The contents
1032 * will get filled in later when the guest tries to read it.
1034 if (pread(fd
, &orig
, 4, offset
) != 4 ||
1035 pwrite(fd
, &size
, 4, offset
) != 4 ||
1036 pread(fd
, &size
, 4, offset
) != 4 ||
1037 pwrite(fd
, &orig
, 4, offset
) != 4) {
1038 error_report("%s(%s) failed: %m", __func__
, vdev
->vbasedev
.name
);
1042 size
= ~(le32_to_cpu(size
) & PCI_ROM_ADDRESS_MASK
) + 1;
1048 if (vfio_opt_rom_in_denylist(vdev
)) {
1049 if (dev
->opts
&& qdict_haskey(dev
->opts
, "rombar")) {
1050 warn_report("Device at %s is known to cause system instability"
1051 " issues during option rom execution",
1052 vdev
->vbasedev
.name
);
1053 error_printf("Proceeding anyway since user specified"
1054 " non zero value for rombar\n");
1056 warn_report("Rom loading for device at %s has been disabled"
1057 " due to system instability issues",
1058 vdev
->vbasedev
.name
);
1059 error_printf("Specify rombar=1 or romfile to force\n");
1064 trace_vfio_pci_size_rom(vdev
->vbasedev
.name
, size
);
1066 name
= g_strdup_printf("vfio[%s].rom", vdev
->vbasedev
.name
);
1068 memory_region_init_io(&vdev
->pdev
.rom
, OBJECT(vdev
),
1069 &vfio_rom_ops
, vdev
, name
, size
);
1072 pci_register_bar(&vdev
->pdev
, PCI_ROM_SLOT
,
1073 PCI_BASE_ADDRESS_SPACE_MEMORY
, &vdev
->pdev
.rom
);
1075 vdev
->rom_read_failed
= false;
1078 void vfio_vga_write(void *opaque
, hwaddr addr
,
1079 uint64_t data
, unsigned size
)
1081 VFIOVGARegion
*region
= opaque
;
1082 VFIOVGA
*vga
= container_of(region
, VFIOVGA
, region
[region
->nr
]);
1089 off_t offset
= vga
->fd_offset
+ region
->offset
+ addr
;
1096 buf
.word
= cpu_to_le16(data
);
1099 buf
.dword
= cpu_to_le32(data
);
1102 hw_error("vfio: unsupported write size, %d bytes", size
);
1106 if (pwrite(vga
->fd
, &buf
, size
, offset
) != size
) {
1107 error_report("%s(,0x%"HWADDR_PRIx
", 0x%"PRIx64
", %d) failed: %m",
1108 __func__
, region
->offset
+ addr
, data
, size
);
1111 trace_vfio_vga_write(region
->offset
+ addr
, data
, size
);
1114 uint64_t vfio_vga_read(void *opaque
, hwaddr addr
, unsigned size
)
1116 VFIOVGARegion
*region
= opaque
;
1117 VFIOVGA
*vga
= container_of(region
, VFIOVGA
, region
[region
->nr
]);
1125 off_t offset
= vga
->fd_offset
+ region
->offset
+ addr
;
1127 if (pread(vga
->fd
, &buf
, size
, offset
) != size
) {
1128 error_report("%s(,0x%"HWADDR_PRIx
", %d) failed: %m",
1129 __func__
, region
->offset
+ addr
, size
);
1130 return (uint64_t)-1;
1138 data
= le16_to_cpu(buf
.word
);
1141 data
= le32_to_cpu(buf
.dword
);
1144 hw_error("vfio: unsupported read size, %d bytes", size
);
1148 trace_vfio_vga_read(region
->offset
+ addr
, size
, data
);
1153 static const MemoryRegionOps vfio_vga_ops
= {
1154 .read
= vfio_vga_read
,
1155 .write
= vfio_vga_write
,
1156 .endianness
= DEVICE_LITTLE_ENDIAN
,
1160 * Expand memory region of sub-page(size < PAGE_SIZE) MMIO BAR to page
1161 * size if the BAR is in an exclusive page in host so that we could map
1162 * this BAR to guest. But this sub-page BAR may not occupy an exclusive
1163 * page in guest. So we should set the priority of the expanded memory
1164 * region to zero in case of overlap with BARs which share the same page
1165 * with the sub-page BAR in guest. Besides, we should also recover the
1166 * size of this sub-page BAR when its base address is changed in guest
1167 * and not page aligned any more.
1169 static void vfio_sub_page_bar_update_mapping(PCIDevice
*pdev
, int bar
)
1171 VFIOPCIDevice
*vdev
= VFIO_PCI(pdev
);
1172 VFIORegion
*region
= &vdev
->bars
[bar
].region
;
1173 MemoryRegion
*mmap_mr
, *region_mr
, *base_mr
;
1176 uint64_t size
= region
->size
;
1178 /* Make sure that the whole region is allowed to be mmapped */
1179 if (region
->nr_mmaps
!= 1 || !region
->mmaps
[0].mmap
||
1180 region
->mmaps
[0].size
!= region
->size
) {
1184 r
= &pdev
->io_regions
[bar
];
1186 base_mr
= vdev
->bars
[bar
].mr
;
1187 region_mr
= region
->mem
;
1188 mmap_mr
= ®ion
->mmaps
[0].mem
;
1190 /* If BAR is mapped and page aligned, update to fill PAGE_SIZE */
1191 if (bar_addr
!= PCI_BAR_UNMAPPED
&&
1192 !(bar_addr
& ~qemu_real_host_page_mask())) {
1193 size
= qemu_real_host_page_size();
1196 memory_region_transaction_begin();
1198 if (vdev
->bars
[bar
].size
< size
) {
1199 memory_region_set_size(base_mr
, size
);
1201 memory_region_set_size(region_mr
, size
);
1202 memory_region_set_size(mmap_mr
, size
);
1203 if (size
!= vdev
->bars
[bar
].size
&& memory_region_is_mapped(base_mr
)) {
1204 memory_region_del_subregion(r
->address_space
, base_mr
);
1205 memory_region_add_subregion_overlap(r
->address_space
,
1206 bar_addr
, base_mr
, 0);
1209 memory_region_transaction_commit();
1215 uint32_t vfio_pci_read_config(PCIDevice
*pdev
, uint32_t addr
, int len
)
1217 VFIOPCIDevice
*vdev
= VFIO_PCI(pdev
);
1218 uint32_t emu_bits
= 0, emu_val
= 0, phys_val
= 0, val
;
1220 memcpy(&emu_bits
, vdev
->emulated_config_bits
+ addr
, len
);
1221 emu_bits
= le32_to_cpu(emu_bits
);
1224 emu_val
= pci_default_read_config(pdev
, addr
, len
);
1227 if (~emu_bits
& (0xffffffffU
>> (32 - len
* 8))) {
1230 ret
= pread(vdev
->vbasedev
.fd
, &phys_val
, len
,
1231 vdev
->config_offset
+ addr
);
1233 error_report("%s(%s, 0x%x, 0x%x) failed: %m",
1234 __func__
, vdev
->vbasedev
.name
, addr
, len
);
1237 phys_val
= le32_to_cpu(phys_val
);
1240 val
= (emu_val
& emu_bits
) | (phys_val
& ~emu_bits
);
1242 trace_vfio_pci_read_config(vdev
->vbasedev
.name
, addr
, len
, val
);
1247 void vfio_pci_write_config(PCIDevice
*pdev
,
1248 uint32_t addr
, uint32_t val
, int len
)
1250 VFIOPCIDevice
*vdev
= VFIO_PCI(pdev
);
1251 uint32_t val_le
= cpu_to_le32(val
);
1253 trace_vfio_pci_write_config(vdev
->vbasedev
.name
, addr
, val
, len
);
1255 /* Write everything to VFIO, let it filter out what we can't write */
1256 if (pwrite(vdev
->vbasedev
.fd
, &val_le
, len
, vdev
->config_offset
+ addr
)
1258 error_report("%s(%s, 0x%x, 0x%x, 0x%x) failed: %m",
1259 __func__
, vdev
->vbasedev
.name
, addr
, val
, len
);
1262 /* MSI/MSI-X Enabling/Disabling */
1263 if (pdev
->cap_present
& QEMU_PCI_CAP_MSI
&&
1264 ranges_overlap(addr
, len
, pdev
->msi_cap
, vdev
->msi_cap_size
)) {
1265 int is_enabled
, was_enabled
= msi_enabled(pdev
);
1267 pci_default_write_config(pdev
, addr
, val
, len
);
1269 is_enabled
= msi_enabled(pdev
);
1273 vfio_msi_enable(vdev
);
1277 vfio_msi_disable(vdev
);
1279 vfio_update_msi(vdev
);
1282 } else if (pdev
->cap_present
& QEMU_PCI_CAP_MSIX
&&
1283 ranges_overlap(addr
, len
, pdev
->msix_cap
, MSIX_CAP_LENGTH
)) {
1284 int is_enabled
, was_enabled
= msix_enabled(pdev
);
1286 pci_default_write_config(pdev
, addr
, val
, len
);
1288 is_enabled
= msix_enabled(pdev
);
1290 if (!was_enabled
&& is_enabled
) {
1291 vfio_msix_enable(vdev
);
1292 } else if (was_enabled
&& !is_enabled
) {
1293 vfio_msix_disable(vdev
);
1295 } else if (ranges_overlap(addr
, len
, PCI_BASE_ADDRESS_0
, 24) ||
1296 range_covers_byte(addr
, len
, PCI_COMMAND
)) {
1297 pcibus_t old_addr
[PCI_NUM_REGIONS
- 1];
1300 for (bar
= 0; bar
< PCI_ROM_SLOT
; bar
++) {
1301 old_addr
[bar
] = pdev
->io_regions
[bar
].addr
;
1304 pci_default_write_config(pdev
, addr
, val
, len
);
1306 for (bar
= 0; bar
< PCI_ROM_SLOT
; bar
++) {
1307 if (old_addr
[bar
] != pdev
->io_regions
[bar
].addr
&&
1308 vdev
->bars
[bar
].region
.size
> 0 &&
1309 vdev
->bars
[bar
].region
.size
< qemu_real_host_page_size()) {
1310 vfio_sub_page_bar_update_mapping(pdev
, bar
);
1314 /* Write everything to QEMU to keep emulated bits correct */
1315 pci_default_write_config(pdev
, addr
, val
, len
);
1322 static void vfio_disable_interrupts(VFIOPCIDevice
*vdev
)
1325 * More complicated than it looks. Disabling MSI/X transitions the
1326 * device to INTx mode (if supported). Therefore we need to first
1327 * disable MSI/X and then cleanup by disabling INTx.
1329 if (vdev
->interrupt
== VFIO_INT_MSIX
) {
1330 vfio_msix_disable(vdev
);
1331 } else if (vdev
->interrupt
== VFIO_INT_MSI
) {
1332 vfio_msi_disable(vdev
);
1335 if (vdev
->interrupt
== VFIO_INT_INTx
) {
1336 vfio_intx_disable(vdev
);
1340 static bool vfio_msi_setup(VFIOPCIDevice
*vdev
, int pos
, Error
**errp
)
1343 bool msi_64bit
, msi_maskbit
;
1347 if (pread(vdev
->vbasedev
.fd
, &ctrl
, sizeof(ctrl
),
1348 vdev
->config_offset
+ pos
+ PCI_CAP_FLAGS
) != sizeof(ctrl
)) {
1349 error_setg_errno(errp
, errno
, "failed reading MSI PCI_CAP_FLAGS");
1352 ctrl
= le16_to_cpu(ctrl
);
1354 msi_64bit
= !!(ctrl
& PCI_MSI_FLAGS_64BIT
);
1355 msi_maskbit
= !!(ctrl
& PCI_MSI_FLAGS_MASKBIT
);
1356 entries
= 1 << ((ctrl
& PCI_MSI_FLAGS_QMASK
) >> 1);
1358 trace_vfio_msi_setup(vdev
->vbasedev
.name
, pos
);
1360 ret
= msi_init(&vdev
->pdev
, pos
, entries
, msi_64bit
, msi_maskbit
, &err
);
1362 if (ret
== -ENOTSUP
) {
1365 error_propagate_prepend(errp
, err
, "msi_init failed: ");
1368 vdev
->msi_cap_size
= 0xa + (msi_maskbit
? 0xa : 0) + (msi_64bit
? 0x4 : 0);
1373 static void vfio_pci_fixup_msix_region(VFIOPCIDevice
*vdev
)
1376 VFIORegion
*region
= &vdev
->bars
[vdev
->msix
->table_bar
].region
;
1379 * If the host driver allows mapping of a MSIX data, we are going to
1380 * do map the entire BAR and emulate MSIX table on top of that.
1382 if (vfio_has_region_cap(&vdev
->vbasedev
, region
->nr
,
1383 VFIO_REGION_INFO_CAP_MSIX_MAPPABLE
)) {
1388 * We expect to find a single mmap covering the whole BAR, anything else
1389 * means it's either unsupported or already setup.
1391 if (region
->nr_mmaps
!= 1 || region
->mmaps
[0].offset
||
1392 region
->size
!= region
->mmaps
[0].size
) {
1396 /* MSI-X table start and end aligned to host page size */
1397 start
= vdev
->msix
->table_offset
& qemu_real_host_page_mask();
1398 end
= REAL_HOST_PAGE_ALIGN((uint64_t)vdev
->msix
->table_offset
+
1399 (vdev
->msix
->entries
* PCI_MSIX_ENTRY_SIZE
));
1402 * Does the MSI-X table cover the beginning of the BAR? The whole BAR?
1403 * NB - Host page size is necessarily a power of two and so is the PCI
1404 * BAR (not counting EA yet), therefore if we have host page aligned
1405 * @start and @end, then any remainder of the BAR before or after those
1406 * must be at least host page sized and therefore mmap'able.
1409 if (end
>= region
->size
) {
1410 region
->nr_mmaps
= 0;
1411 g_free(region
->mmaps
);
1412 region
->mmaps
= NULL
;
1413 trace_vfio_msix_fixup(vdev
->vbasedev
.name
,
1414 vdev
->msix
->table_bar
, 0, 0);
1416 region
->mmaps
[0].offset
= end
;
1417 region
->mmaps
[0].size
= region
->size
- end
;
1418 trace_vfio_msix_fixup(vdev
->vbasedev
.name
,
1419 vdev
->msix
->table_bar
, region
->mmaps
[0].offset
,
1420 region
->mmaps
[0].offset
+ region
->mmaps
[0].size
);
1423 /* Maybe it's aligned at the end of the BAR */
1424 } else if (end
>= region
->size
) {
1425 region
->mmaps
[0].size
= start
;
1426 trace_vfio_msix_fixup(vdev
->vbasedev
.name
,
1427 vdev
->msix
->table_bar
, region
->mmaps
[0].offset
,
1428 region
->mmaps
[0].offset
+ region
->mmaps
[0].size
);
1430 /* Otherwise it must split the BAR */
1432 region
->nr_mmaps
= 2;
1433 region
->mmaps
= g_renew(VFIOMmap
, region
->mmaps
, 2);
1435 memcpy(®ion
->mmaps
[1], ®ion
->mmaps
[0], sizeof(VFIOMmap
));
1437 region
->mmaps
[0].size
= start
;
1438 trace_vfio_msix_fixup(vdev
->vbasedev
.name
,
1439 vdev
->msix
->table_bar
, region
->mmaps
[0].offset
,
1440 region
->mmaps
[0].offset
+ region
->mmaps
[0].size
);
1442 region
->mmaps
[1].offset
= end
;
1443 region
->mmaps
[1].size
= region
->size
- end
;
1444 trace_vfio_msix_fixup(vdev
->vbasedev
.name
,
1445 vdev
->msix
->table_bar
, region
->mmaps
[1].offset
,
1446 region
->mmaps
[1].offset
+ region
->mmaps
[1].size
);
1450 static bool vfio_pci_relocate_msix(VFIOPCIDevice
*vdev
, Error
**errp
)
1452 int target_bar
= -1;
1455 if (!vdev
->msix
|| vdev
->msix_relo
== OFF_AUTO_PCIBAR_OFF
) {
1459 /* The actual minimum size of MSI-X structures */
1460 msix_sz
= (vdev
->msix
->entries
* PCI_MSIX_ENTRY_SIZE
) +
1461 (QEMU_ALIGN_UP(vdev
->msix
->entries
, 64) / 8);
1462 /* Round up to host pages, we don't want to share a page */
1463 msix_sz
= REAL_HOST_PAGE_ALIGN(msix_sz
);
1464 /* PCI BARs must be a power of 2 */
1465 msix_sz
= pow2ceil(msix_sz
);
1467 if (vdev
->msix_relo
== OFF_AUTO_PCIBAR_AUTO
) {
1469 * TODO: Lookup table for known devices.
1471 * Logically we might use an algorithm here to select the BAR adding
1472 * the least additional MMIO space, but we cannot programmatically
1473 * predict the driver dependency on BAR ordering or sizing, therefore
1474 * 'auto' becomes a lookup for combinations reported to work.
1476 if (target_bar
< 0) {
1477 error_setg(errp
, "No automatic MSI-X relocation available for "
1478 "device %04x:%04x", vdev
->vendor_id
, vdev
->device_id
);
1482 target_bar
= (int)(vdev
->msix_relo
- OFF_AUTO_PCIBAR_BAR0
);
1485 /* I/O port BARs cannot host MSI-X structures */
1486 if (vdev
->bars
[target_bar
].ioport
) {
1487 error_setg(errp
, "Invalid MSI-X relocation BAR %d, "
1488 "I/O port BAR", target_bar
);
1492 /* Cannot use a BAR in the "shadow" of a 64-bit BAR */
1493 if (!vdev
->bars
[target_bar
].size
&&
1494 target_bar
> 0 && vdev
->bars
[target_bar
- 1].mem64
) {
1495 error_setg(errp
, "Invalid MSI-X relocation BAR %d, "
1496 "consumed by 64-bit BAR %d", target_bar
, target_bar
- 1);
1500 /* 2GB max size for 32-bit BARs, cannot double if already > 1G */
1501 if (vdev
->bars
[target_bar
].size
> 1 * GiB
&&
1502 !vdev
->bars
[target_bar
].mem64
) {
1503 error_setg(errp
, "Invalid MSI-X relocation BAR %d, "
1504 "no space to extend 32-bit BAR", target_bar
);
1509 * If adding a new BAR, test if we can make it 64bit. We make it
1510 * prefetchable since QEMU MSI-X emulation has no read side effects
1511 * and doing so makes mapping more flexible.
1513 if (!vdev
->bars
[target_bar
].size
) {
1514 if (target_bar
< (PCI_ROM_SLOT
- 1) &&
1515 !vdev
->bars
[target_bar
+ 1].size
) {
1516 vdev
->bars
[target_bar
].mem64
= true;
1517 vdev
->bars
[target_bar
].type
= PCI_BASE_ADDRESS_MEM_TYPE_64
;
1519 vdev
->bars
[target_bar
].type
|= PCI_BASE_ADDRESS_MEM_PREFETCH
;
1520 vdev
->bars
[target_bar
].size
= msix_sz
;
1521 vdev
->msix
->table_offset
= 0;
1523 vdev
->bars
[target_bar
].size
= MAX(vdev
->bars
[target_bar
].size
* 2,
1526 * Due to above size calc, MSI-X always starts halfway into the BAR,
1527 * which will always be a separate host page.
1529 vdev
->msix
->table_offset
= vdev
->bars
[target_bar
].size
/ 2;
1532 vdev
->msix
->table_bar
= target_bar
;
1533 vdev
->msix
->pba_bar
= target_bar
;
1534 /* Requires 8-byte alignment, but PCI_MSIX_ENTRY_SIZE guarantees that */
1535 vdev
->msix
->pba_offset
= vdev
->msix
->table_offset
+
1536 (vdev
->msix
->entries
* PCI_MSIX_ENTRY_SIZE
);
1538 trace_vfio_msix_relo(vdev
->vbasedev
.name
,
1539 vdev
->msix
->table_bar
, vdev
->msix
->table_offset
);
1544 * We don't have any control over how pci_add_capability() inserts
1545 * capabilities into the chain. In order to setup MSI-X we need a
1546 * MemoryRegion for the BAR. In order to setup the BAR and not
1547 * attempt to mmap the MSI-X table area, which VFIO won't allow, we
1548 * need to first look for where the MSI-X table lives. So we
1549 * unfortunately split MSI-X setup across two functions.
1551 static bool vfio_msix_early_setup(VFIOPCIDevice
*vdev
, Error
**errp
)
1555 uint32_t table
, pba
;
1556 int ret
, fd
= vdev
->vbasedev
.fd
;
1557 struct vfio_irq_info irq_info
= { .argsz
= sizeof(irq_info
),
1558 .index
= VFIO_PCI_MSIX_IRQ_INDEX
};
1561 pos
= pci_find_capability(&vdev
->pdev
, PCI_CAP_ID_MSIX
);
1566 if (pread(fd
, &ctrl
, sizeof(ctrl
),
1567 vdev
->config_offset
+ pos
+ PCI_MSIX_FLAGS
) != sizeof(ctrl
)) {
1568 error_setg_errno(errp
, errno
, "failed to read PCI MSIX FLAGS");
1572 if (pread(fd
, &table
, sizeof(table
),
1573 vdev
->config_offset
+ pos
+ PCI_MSIX_TABLE
) != sizeof(table
)) {
1574 error_setg_errno(errp
, errno
, "failed to read PCI MSIX TABLE");
1578 if (pread(fd
, &pba
, sizeof(pba
),
1579 vdev
->config_offset
+ pos
+ PCI_MSIX_PBA
) != sizeof(pba
)) {
1580 error_setg_errno(errp
, errno
, "failed to read PCI MSIX PBA");
1584 ctrl
= le16_to_cpu(ctrl
);
1585 table
= le32_to_cpu(table
);
1586 pba
= le32_to_cpu(pba
);
1588 msix
= g_malloc0(sizeof(*msix
));
1589 msix
->table_bar
= table
& PCI_MSIX_FLAGS_BIRMASK
;
1590 msix
->table_offset
= table
& ~PCI_MSIX_FLAGS_BIRMASK
;
1591 msix
->pba_bar
= pba
& PCI_MSIX_FLAGS_BIRMASK
;
1592 msix
->pba_offset
= pba
& ~PCI_MSIX_FLAGS_BIRMASK
;
1593 msix
->entries
= (ctrl
& PCI_MSIX_FLAGS_QSIZE
) + 1;
1595 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_GET_IRQ_INFO
, &irq_info
);
1597 error_setg_errno(errp
, -ret
, "failed to get MSI-X irq info");
1602 msix
->noresize
= !!(irq_info
.flags
& VFIO_IRQ_INFO_NORESIZE
);
1605 * Test the size of the pba_offset variable and catch if it extends outside
1606 * of the specified BAR. If it is the case, we need to apply a hardware
1607 * specific quirk if the device is known or we have a broken configuration.
1609 if (msix
->pba_offset
>= vdev
->bars
[msix
->pba_bar
].region
.size
) {
1611 * Chelsio T5 Virtual Function devices are encoded as 0x58xx for T5
1612 * adapters. The T5 hardware returns an incorrect value of 0x8000 for
1613 * the VF PBA offset while the BAR itself is only 8k. The correct value
1614 * is 0x1000, so we hard code that here.
1616 if (vdev
->vendor_id
== PCI_VENDOR_ID_CHELSIO
&&
1617 (vdev
->device_id
& 0xff00) == 0x5800) {
1618 msix
->pba_offset
= 0x1000;
1620 * BAIDU KUNLUN Virtual Function devices for KUNLUN AI processor
1621 * return an incorrect value of 0x460000 for the VF PBA offset while
1622 * the BAR itself is only 0x10000. The correct value is 0xb400.
1624 } else if (vfio_pci_is(vdev
, PCI_VENDOR_ID_BAIDU
,
1625 PCI_DEVICE_ID_KUNLUN_VF
)) {
1626 msix
->pba_offset
= 0xb400;
1627 } else if (vdev
->msix_relo
== OFF_AUTO_PCIBAR_OFF
) {
1628 error_setg(errp
, "hardware reports invalid configuration, "
1629 "MSIX PBA outside of specified BAR");
1635 trace_vfio_msix_early_setup(vdev
->vbasedev
.name
, pos
, msix
->table_bar
,
1636 msix
->table_offset
, msix
->entries
,
1640 vfio_pci_fixup_msix_region(vdev
);
1642 return vfio_pci_relocate_msix(vdev
, errp
);
1645 static bool vfio_msix_setup(VFIOPCIDevice
*vdev
, int pos
, Error
**errp
)
1650 vdev
->msix
->pending
= g_new0(unsigned long,
1651 BITS_TO_LONGS(vdev
->msix
->entries
));
1652 ret
= msix_init(&vdev
->pdev
, vdev
->msix
->entries
,
1653 vdev
->bars
[vdev
->msix
->table_bar
].mr
,
1654 vdev
->msix
->table_bar
, vdev
->msix
->table_offset
,
1655 vdev
->bars
[vdev
->msix
->pba_bar
].mr
,
1656 vdev
->msix
->pba_bar
, vdev
->msix
->pba_offset
, pos
,
1659 if (ret
== -ENOTSUP
) {
1660 warn_report_err(err
);
1664 error_propagate(errp
, err
);
1669 * The PCI spec suggests that devices provide additional alignment for
1670 * MSI-X structures and avoid overlapping non-MSI-X related registers.
1671 * For an assigned device, this hopefully means that emulation of MSI-X
1672 * structures does not affect the performance of the device. If devices
1673 * fail to provide that alignment, a significant performance penalty may
1674 * result, for instance Mellanox MT27500 VFs:
1675 * http://www.spinics.net/lists/kvm/msg125881.html
1677 * The PBA is simply not that important for such a serious regression and
1678 * most drivers do not appear to look at it. The solution for this is to
1679 * disable the PBA MemoryRegion unless it's being used. We disable it
1680 * here and only enable it if a masked vector fires through QEMU. As the
1681 * vector-use notifier is called, which occurs on unmask, we test whether
1682 * PBA emulation is needed and again disable if not.
1684 memory_region_set_enabled(&vdev
->pdev
.msix_pba_mmio
, false);
1687 * The emulated machine may provide a paravirt interface for MSIX setup
1688 * so it is not strictly necessary to emulate MSIX here. This becomes
1689 * helpful when frequently accessed MMIO registers are located in
1690 * subpages adjacent to the MSIX table but the MSIX data containing page
1691 * cannot be mapped because of a host page size bigger than the MSIX table
1694 if (object_property_get_bool(OBJECT(qdev_get_machine()),
1695 "vfio-no-msix-emulation", NULL
)) {
1696 memory_region_set_enabled(&vdev
->pdev
.msix_table_mmio
, false);
1702 static void vfio_teardown_msi(VFIOPCIDevice
*vdev
)
1704 msi_uninit(&vdev
->pdev
);
1707 msix_uninit(&vdev
->pdev
,
1708 vdev
->bars
[vdev
->msix
->table_bar
].mr
,
1709 vdev
->bars
[vdev
->msix
->pba_bar
].mr
);
1710 g_free(vdev
->msix
->pending
);
1717 static void vfio_mmap_set_enabled(VFIOPCIDevice
*vdev
, bool enabled
)
1721 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1722 vfio_region_mmaps_set_enabled(&vdev
->bars
[i
].region
, enabled
);
1726 static void vfio_bar_prepare(VFIOPCIDevice
*vdev
, int nr
)
1728 VFIOBAR
*bar
= &vdev
->bars
[nr
];
1733 /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
1734 if (!bar
->region
.size
) {
1738 /* Determine what type of BAR this is for registration */
1739 ret
= pread(vdev
->vbasedev
.fd
, &pci_bar
, sizeof(pci_bar
),
1740 vdev
->config_offset
+ PCI_BASE_ADDRESS_0
+ (4 * nr
));
1741 if (ret
!= sizeof(pci_bar
)) {
1742 error_report("vfio: Failed to read BAR %d (%m)", nr
);
1746 pci_bar
= le32_to_cpu(pci_bar
);
1747 bar
->ioport
= (pci_bar
& PCI_BASE_ADDRESS_SPACE_IO
);
1748 bar
->mem64
= bar
->ioport
? 0 : (pci_bar
& PCI_BASE_ADDRESS_MEM_TYPE_64
);
1749 bar
->type
= pci_bar
& (bar
->ioport
? ~PCI_BASE_ADDRESS_IO_MASK
:
1750 ~PCI_BASE_ADDRESS_MEM_MASK
);
1751 bar
->size
= bar
->region
.size
;
1754 static void vfio_bars_prepare(VFIOPCIDevice
*vdev
)
1758 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1759 vfio_bar_prepare(vdev
, i
);
1763 static void vfio_bar_register(VFIOPCIDevice
*vdev
, int nr
)
1765 VFIOBAR
*bar
= &vdev
->bars
[nr
];
1772 bar
->mr
= g_new0(MemoryRegion
, 1);
1773 name
= g_strdup_printf("%s base BAR %d", vdev
->vbasedev
.name
, nr
);
1774 memory_region_init_io(bar
->mr
, OBJECT(vdev
), NULL
, NULL
, name
, bar
->size
);
1777 if (bar
->region
.size
) {
1778 memory_region_add_subregion(bar
->mr
, 0, bar
->region
.mem
);
1780 if (vfio_region_mmap(&bar
->region
)) {
1781 error_report("Failed to mmap %s BAR %d. Performance may be slow",
1782 vdev
->vbasedev
.name
, nr
);
1786 pci_register_bar(&vdev
->pdev
, nr
, bar
->type
, bar
->mr
);
1789 static void vfio_bars_register(VFIOPCIDevice
*vdev
)
1793 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1794 vfio_bar_register(vdev
, i
);
1798 static void vfio_bars_exit(VFIOPCIDevice
*vdev
)
1802 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1803 VFIOBAR
*bar
= &vdev
->bars
[i
];
1805 vfio_bar_quirk_exit(vdev
, i
);
1806 vfio_region_exit(&bar
->region
);
1807 if (bar
->region
.size
) {
1808 memory_region_del_subregion(bar
->mr
, bar
->region
.mem
);
1813 pci_unregister_vga(&vdev
->pdev
);
1814 vfio_vga_quirk_exit(vdev
);
1818 static void vfio_bars_finalize(VFIOPCIDevice
*vdev
)
1822 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
1823 VFIOBAR
*bar
= &vdev
->bars
[i
];
1825 vfio_bar_quirk_finalize(vdev
, i
);
1826 vfio_region_finalize(&bar
->region
);
1829 object_unparent(OBJECT(bar
->mr
));
1836 vfio_vga_quirk_finalize(vdev
);
1837 for (i
= 0; i
< ARRAY_SIZE(vdev
->vga
->region
); i
++) {
1838 object_unparent(OBJECT(&vdev
->vga
->region
[i
].mem
));
1847 static uint8_t vfio_std_cap_max_size(PCIDevice
*pdev
, uint8_t pos
)
1850 uint16_t next
= PCI_CONFIG_SPACE_SIZE
;
1852 for (tmp
= pdev
->config
[PCI_CAPABILITY_LIST
]; tmp
;
1853 tmp
= pdev
->config
[tmp
+ PCI_CAP_LIST_NEXT
]) {
1854 if (tmp
> pos
&& tmp
< next
) {
1863 static uint16_t vfio_ext_cap_max_size(const uint8_t *config
, uint16_t pos
)
1865 uint16_t tmp
, next
= PCIE_CONFIG_SPACE_SIZE
;
1867 for (tmp
= PCI_CONFIG_SPACE_SIZE
; tmp
;
1868 tmp
= PCI_EXT_CAP_NEXT(pci_get_long(config
+ tmp
))) {
1869 if (tmp
> pos
&& tmp
< next
) {
1877 static void vfio_set_word_bits(uint8_t *buf
, uint16_t val
, uint16_t mask
)
1879 pci_set_word(buf
, (pci_get_word(buf
) & ~mask
) | val
);
1882 static void vfio_add_emulated_word(VFIOPCIDevice
*vdev
, int pos
,
1883 uint16_t val
, uint16_t mask
)
1885 vfio_set_word_bits(vdev
->pdev
.config
+ pos
, val
, mask
);
1886 vfio_set_word_bits(vdev
->pdev
.wmask
+ pos
, ~mask
, mask
);
1887 vfio_set_word_bits(vdev
->emulated_config_bits
+ pos
, mask
, mask
);
1890 static void vfio_set_long_bits(uint8_t *buf
, uint32_t val
, uint32_t mask
)
1892 pci_set_long(buf
, (pci_get_long(buf
) & ~mask
) | val
);
1895 static void vfio_add_emulated_long(VFIOPCIDevice
*vdev
, int pos
,
1896 uint32_t val
, uint32_t mask
)
1898 vfio_set_long_bits(vdev
->pdev
.config
+ pos
, val
, mask
);
1899 vfio_set_long_bits(vdev
->pdev
.wmask
+ pos
, ~mask
, mask
);
1900 vfio_set_long_bits(vdev
->emulated_config_bits
+ pos
, mask
, mask
);
1903 static void vfio_pci_enable_rp_atomics(VFIOPCIDevice
*vdev
)
1905 struct vfio_device_info_cap_pci_atomic_comp
*cap
;
1906 g_autofree
struct vfio_device_info
*info
= NULL
;
1907 PCIBus
*bus
= pci_get_bus(&vdev
->pdev
);
1908 PCIDevice
*parent
= bus
->parent_dev
;
1909 struct vfio_info_cap_header
*hdr
;
1914 * PCIe Atomic Ops completer support is only added automatically for single
1915 * function devices downstream of a root port supporting DEVCAP2. Support
1916 * is added during realize and, if added, removed during device exit. The
1917 * single function requirement avoids conflicting requirements should a
1918 * slot be composed of multiple devices with differing capabilities.
1920 if (pci_bus_is_root(bus
) || !parent
|| !parent
->exp
.exp_cap
||
1921 pcie_cap_get_type(parent
) != PCI_EXP_TYPE_ROOT_PORT
||
1922 pcie_cap_get_version(parent
) != PCI_EXP_FLAGS_VER2
||
1924 vdev
->pdev
.cap_present
& QEMU_PCI_CAP_MULTIFUNCTION
) {
1928 pos
= parent
->config
+ parent
->exp
.exp_cap
+ PCI_EXP_DEVCAP2
;
1930 /* Abort if there'a already an Atomic Ops configuration on the root port */
1931 if (pci_get_long(pos
) & (PCI_EXP_DEVCAP2_ATOMIC_COMP32
|
1932 PCI_EXP_DEVCAP2_ATOMIC_COMP64
|
1933 PCI_EXP_DEVCAP2_ATOMIC_COMP128
)) {
1937 info
= vfio_get_device_info(vdev
->vbasedev
.fd
);
1942 hdr
= vfio_get_device_info_cap(info
, VFIO_DEVICE_INFO_CAP_PCI_ATOMIC_COMP
);
1948 if (cap
->flags
& VFIO_PCI_ATOMIC_COMP32
) {
1949 mask
|= PCI_EXP_DEVCAP2_ATOMIC_COMP32
;
1951 if (cap
->flags
& VFIO_PCI_ATOMIC_COMP64
) {
1952 mask
|= PCI_EXP_DEVCAP2_ATOMIC_COMP64
;
1954 if (cap
->flags
& VFIO_PCI_ATOMIC_COMP128
) {
1955 mask
|= PCI_EXP_DEVCAP2_ATOMIC_COMP128
;
1962 pci_long_test_and_set_mask(pos
, mask
);
1963 vdev
->clear_parent_atomics_on_exit
= true;
1966 static void vfio_pci_disable_rp_atomics(VFIOPCIDevice
*vdev
)
1968 if (vdev
->clear_parent_atomics_on_exit
) {
1969 PCIDevice
*parent
= pci_get_bus(&vdev
->pdev
)->parent_dev
;
1970 uint8_t *pos
= parent
->config
+ parent
->exp
.exp_cap
+ PCI_EXP_DEVCAP2
;
1972 pci_long_test_and_clear_mask(pos
, PCI_EXP_DEVCAP2_ATOMIC_COMP32
|
1973 PCI_EXP_DEVCAP2_ATOMIC_COMP64
|
1974 PCI_EXP_DEVCAP2_ATOMIC_COMP128
);
1978 static bool vfio_setup_pcie_cap(VFIOPCIDevice
*vdev
, int pos
, uint8_t size
,
1984 flags
= pci_get_word(vdev
->pdev
.config
+ pos
+ PCI_CAP_FLAGS
);
1985 type
= (flags
& PCI_EXP_FLAGS_TYPE
) >> 4;
1987 if (type
!= PCI_EXP_TYPE_ENDPOINT
&&
1988 type
!= PCI_EXP_TYPE_LEG_END
&&
1989 type
!= PCI_EXP_TYPE_RC_END
) {
1991 error_setg(errp
, "assignment of PCIe type 0x%x "
1992 "devices is not currently supported", type
);
1996 if (!pci_bus_is_express(pci_get_bus(&vdev
->pdev
))) {
1997 PCIBus
*bus
= pci_get_bus(&vdev
->pdev
);
2001 * Traditionally PCI device assignment exposes the PCIe capability
2002 * as-is on non-express buses. The reason being that some drivers
2003 * simply assume that it's there, for example tg3. However when
2004 * we're running on a native PCIe machine type, like Q35, we need
2005 * to hide the PCIe capability. The reason for this is twofold;
2006 * first Windows guests get a Code 10 error when the PCIe capability
2007 * is exposed in this configuration. Therefore express devices won't
2008 * work at all unless they're attached to express buses in the VM.
2009 * Second, a native PCIe machine introduces the possibility of fine
2010 * granularity IOMMUs supporting both translation and isolation.
2011 * Guest code to discover the IOMMU visibility of a device, such as
2012 * IOMMU grouping code on Linux, is very aware of device types and
2013 * valid transitions between bus types. An express device on a non-
2014 * express bus is not a valid combination on bare metal systems.
2016 * Drivers that require a PCIe capability to make the device
2017 * functional are simply going to need to have their devices placed
2018 * on a PCIe bus in the VM.
2020 while (!pci_bus_is_root(bus
)) {
2021 bridge
= pci_bridge_get_device(bus
);
2022 bus
= pci_get_bus(bridge
);
2025 if (pci_bus_is_express(bus
)) {
2029 } else if (pci_bus_is_root(pci_get_bus(&vdev
->pdev
))) {
2031 * On a Root Complex bus Endpoints become Root Complex Integrated
2032 * Endpoints, which changes the type and clears the LNK & LNK2 fields.
2034 if (type
== PCI_EXP_TYPE_ENDPOINT
) {
2035 vfio_add_emulated_word(vdev
, pos
+ PCI_CAP_FLAGS
,
2036 PCI_EXP_TYPE_RC_END
<< 4,
2037 PCI_EXP_FLAGS_TYPE
);
2039 /* Link Capabilities, Status, and Control goes away */
2040 if (size
> PCI_EXP_LNKCTL
) {
2041 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP
, 0, ~0);
2042 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL
, 0, ~0);
2043 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKSTA
, 0, ~0);
2045 #ifndef PCI_EXP_LNKCAP2
2046 #define PCI_EXP_LNKCAP2 44
2048 #ifndef PCI_EXP_LNKSTA2
2049 #define PCI_EXP_LNKSTA2 50
2051 /* Link 2 Capabilities, Status, and Control goes away */
2052 if (size
> PCI_EXP_LNKCAP2
) {
2053 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP2
, 0, ~0);
2054 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL2
, 0, ~0);
2055 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKSTA2
, 0, ~0);
2059 } else if (type
== PCI_EXP_TYPE_LEG_END
) {
2061 * Legacy endpoints don't belong on the root complex. Windows
2062 * seems to be happier with devices if we skip the capability.
2069 * Convert Root Complex Integrated Endpoints to regular endpoints.
2070 * These devices don't support LNK/LNK2 capabilities, so make them up.
2072 if (type
== PCI_EXP_TYPE_RC_END
) {
2073 vfio_add_emulated_word(vdev
, pos
+ PCI_CAP_FLAGS
,
2074 PCI_EXP_TYPE_ENDPOINT
<< 4,
2075 PCI_EXP_FLAGS_TYPE
);
2076 vfio_add_emulated_long(vdev
, pos
+ PCI_EXP_LNKCAP
,
2077 QEMU_PCI_EXP_LNKCAP_MLW(QEMU_PCI_EXP_LNK_X1
) |
2078 QEMU_PCI_EXP_LNKCAP_MLS(QEMU_PCI_EXP_LNK_2_5GT
), ~0);
2079 vfio_add_emulated_word(vdev
, pos
+ PCI_EXP_LNKCTL
, 0, ~0);
2082 vfio_pci_enable_rp_atomics(vdev
);
2086 * Intel 82599 SR-IOV VFs report an invalid PCIe capability version 0
2087 * (Niantic errate #35) causing Windows to error with a Code 10 for the
2088 * device on Q35. Fixup any such devices to report version 1. If we
2089 * were to remove the capability entirely the guest would lose extended
2092 if ((flags
& PCI_EXP_FLAGS_VERS
) == 0) {
2093 vfio_add_emulated_word(vdev
, pos
+ PCI_CAP_FLAGS
,
2094 1, PCI_EXP_FLAGS_VERS
);
2097 pos
= pci_add_capability(&vdev
->pdev
, PCI_CAP_ID_EXP
, pos
, size
,
2103 vdev
->pdev
.exp
.exp_cap
= pos
;
2108 static void vfio_check_pcie_flr(VFIOPCIDevice
*vdev
, uint8_t pos
)
2110 uint32_t cap
= pci_get_long(vdev
->pdev
.config
+ pos
+ PCI_EXP_DEVCAP
);
2112 if (cap
& PCI_EXP_DEVCAP_FLR
) {
2113 trace_vfio_check_pcie_flr(vdev
->vbasedev
.name
);
2114 vdev
->has_flr
= true;
2118 static void vfio_check_pm_reset(VFIOPCIDevice
*vdev
, uint8_t pos
)
2120 uint16_t csr
= pci_get_word(vdev
->pdev
.config
+ pos
+ PCI_PM_CTRL
);
2122 if (!(csr
& PCI_PM_CTRL_NO_SOFT_RESET
)) {
2123 trace_vfio_check_pm_reset(vdev
->vbasedev
.name
);
2124 vdev
->has_pm_reset
= true;
2128 static void vfio_check_af_flr(VFIOPCIDevice
*vdev
, uint8_t pos
)
2130 uint8_t cap
= pci_get_byte(vdev
->pdev
.config
+ pos
+ PCI_AF_CAP
);
2132 if ((cap
& PCI_AF_CAP_TP
) && (cap
& PCI_AF_CAP_FLR
)) {
2133 trace_vfio_check_af_flr(vdev
->vbasedev
.name
);
2134 vdev
->has_flr
= true;
2138 static bool vfio_add_vendor_specific_cap(VFIOPCIDevice
*vdev
, int pos
,
2139 uint8_t size
, Error
**errp
)
2141 PCIDevice
*pdev
= &vdev
->pdev
;
2143 pos
= pci_add_capability(pdev
, PCI_CAP_ID_VNDR
, pos
, size
, errp
);
2149 * Exempt config space check for Vendor Specific Information during
2151 * Config space check is still enforced for 3 byte VSC header.
2153 if (vdev
->skip_vsc_check
&& size
> 3) {
2154 memset(pdev
->cmask
+ pos
+ 3, 0, size
- 3);
2160 static bool vfio_add_std_cap(VFIOPCIDevice
*vdev
, uint8_t pos
, Error
**errp
)
2163 PCIDevice
*pdev
= &vdev
->pdev
;
2164 uint8_t cap_id
, next
, size
;
2167 cap_id
= pdev
->config
[pos
];
2168 next
= pdev
->config
[pos
+ PCI_CAP_LIST_NEXT
];
2171 * If it becomes important to configure capabilities to their actual
2172 * size, use this as the default when it's something we don't recognize.
2173 * Since QEMU doesn't actually handle many of the config accesses,
2174 * exact size doesn't seem worthwhile.
2176 size
= vfio_std_cap_max_size(pdev
, pos
);
2179 * pci_add_capability always inserts the new capability at the head
2180 * of the chain. Therefore to end up with a chain that matches the
2181 * physical device, we insert from the end by making this recursive.
2182 * This is also why we pre-calculate size above as cached config space
2183 * will be changed as we unwind the stack.
2186 if (!vfio_add_std_cap(vdev
, next
, errp
)) {
2190 /* Begin the rebuild, use QEMU emulated list bits */
2191 pdev
->config
[PCI_CAPABILITY_LIST
] = 0;
2192 vdev
->emulated_config_bits
[PCI_CAPABILITY_LIST
] = 0xff;
2193 vdev
->emulated_config_bits
[PCI_STATUS
] |= PCI_STATUS_CAP_LIST
;
2195 if (!vfio_add_virt_caps(vdev
, errp
)) {
2200 /* Scale down size, esp in case virt caps were added above */
2201 size
= MIN(size
, vfio_std_cap_max_size(pdev
, pos
));
2203 /* Use emulated next pointer to allow dropping caps */
2204 pci_set_byte(vdev
->emulated_config_bits
+ pos
+ PCI_CAP_LIST_NEXT
, 0xff);
2207 case PCI_CAP_ID_MSI
:
2208 ret
= vfio_msi_setup(vdev
, pos
, errp
);
2210 case PCI_CAP_ID_EXP
:
2211 vfio_check_pcie_flr(vdev
, pos
);
2212 ret
= vfio_setup_pcie_cap(vdev
, pos
, size
, errp
);
2214 case PCI_CAP_ID_MSIX
:
2215 ret
= vfio_msix_setup(vdev
, pos
, errp
);
2218 vfio_check_pm_reset(vdev
, pos
);
2220 ret
= pci_add_capability(pdev
, cap_id
, pos
, size
, errp
) >= 0;
2223 vfio_check_af_flr(vdev
, pos
);
2224 ret
= pci_add_capability(pdev
, cap_id
, pos
, size
, errp
) >= 0;
2226 case PCI_CAP_ID_VNDR
:
2227 ret
= vfio_add_vendor_specific_cap(vdev
, pos
, size
, errp
);
2230 ret
= pci_add_capability(pdev
, cap_id
, pos
, size
, errp
) >= 0;
2236 "failed to add PCI capability 0x%x[0x%x]@0x%x: ",
2243 static int vfio_setup_rebar_ecap(VFIOPCIDevice
*vdev
, uint16_t pos
)
2248 ctrl
= pci_get_long(vdev
->pdev
.config
+ pos
+ PCI_REBAR_CTRL
);
2249 nbar
= (ctrl
& PCI_REBAR_CTRL_NBAR_MASK
) >> PCI_REBAR_CTRL_NBAR_SHIFT
;
2251 for (i
= 0; i
< nbar
; i
++) {
2255 ctrl
= pci_get_long(vdev
->pdev
.config
+ pos
+ PCI_REBAR_CTRL
+ (i
* 8));
2256 size
= (ctrl
& PCI_REBAR_CTRL_BAR_SIZE
) >> PCI_REBAR_CTRL_BAR_SHIFT
;
2258 /* The cap register reports sizes 1MB to 128TB, with 4 reserved bits */
2259 cap
= size
<= 27 ? 1U << (size
+ 4) : 0;
2262 * The PCIe spec (v6.0.1, 7.8.6) requires HW to support at least one
2263 * size in the range 1MB to 512GB. We intend to mask all sizes except
2264 * the one currently enabled in the size field, therefore if it's
2265 * outside the range, hide the whole capability as this virtualization
2266 * trick won't work. If >512GB resizable BARs start to appear, we
2267 * might need an opt-in or reservation scheme in the kernel.
2269 if (!(cap
& PCI_REBAR_CAP_SIZES
)) {
2273 /* Hide all sizes reported in the ctrl reg per above requirement. */
2274 ctrl
&= (PCI_REBAR_CTRL_BAR_SIZE
|
2275 PCI_REBAR_CTRL_NBAR_MASK
|
2276 PCI_REBAR_CTRL_BAR_IDX
);
2279 * The BAR size field is RW, however we've mangled the capability
2280 * register such that we only report a single size, ie. the current
2281 * BAR size. A write of an unsupported value is undefined, therefore
2282 * the register field is essentially RO.
2284 vfio_add_emulated_long(vdev
, pos
+ PCI_REBAR_CAP
+ (i
* 8), cap
, ~0);
2285 vfio_add_emulated_long(vdev
, pos
+ PCI_REBAR_CTRL
+ (i
* 8), ctrl
, ~0);
2291 static void vfio_add_ext_cap(VFIOPCIDevice
*vdev
)
2293 PCIDevice
*pdev
= &vdev
->pdev
;
2295 uint16_t cap_id
, next
, size
;
2299 /* Only add extended caps if we have them and the guest can see them */
2300 if (!pci_is_express(pdev
) || !pci_bus_is_express(pci_get_bus(pdev
)) ||
2301 !pci_get_long(pdev
->config
+ PCI_CONFIG_SPACE_SIZE
)) {
2306 * pcie_add_capability always inserts the new capability at the tail
2307 * of the chain. Therefore to end up with a chain that matches the
2308 * physical device, we cache the config space to avoid overwriting
2309 * the original config space when we parse the extended capabilities.
2311 config
= g_memdup(pdev
->config
, vdev
->config_size
);
2314 * Extended capabilities are chained with each pointing to the next, so we
2315 * can drop anything other than the head of the chain simply by modifying
2316 * the previous next pointer. Seed the head of the chain here such that
2317 * we can simply skip any capabilities we want to drop below, regardless
2318 * of their position in the chain. If this stub capability still exists
2319 * after we add the capabilities we want to expose, update the capability
2320 * ID to zero. Note that we cannot seed with the capability header being
2321 * zero as this conflicts with definition of an absent capability chain
2322 * and prevents capabilities beyond the head of the list from being added.
2323 * By replacing the dummy capability ID with zero after walking the device
2324 * chain, we also transparently mark extended capabilities as absent if
2325 * no capabilities were added. Note that the PCIe spec defines an absence
2326 * of extended capabilities to be determined by a value of zero for the
2327 * capability ID, version, AND next pointer. A non-zero next pointer
2328 * should be sufficient to indicate additional capabilities are present,
2329 * which will occur if we call pcie_add_capability() below. The entire
2330 * first dword is emulated to support this.
2332 * NB. The kernel side does similar masking, so be prepared that our
2333 * view of the device may also contain a capability ID zero in the head
2334 * of the chain. Skip it for the same reason that we cannot seed the
2335 * chain with a zero capability.
2337 pci_set_long(pdev
->config
+ PCI_CONFIG_SPACE_SIZE
,
2338 PCI_EXT_CAP(0xFFFF, 0, 0));
2339 pci_set_long(pdev
->wmask
+ PCI_CONFIG_SPACE_SIZE
, 0);
2340 pci_set_long(vdev
->emulated_config_bits
+ PCI_CONFIG_SPACE_SIZE
, ~0);
2342 for (next
= PCI_CONFIG_SPACE_SIZE
; next
;
2343 next
= PCI_EXT_CAP_NEXT(pci_get_long(config
+ next
))) {
2344 header
= pci_get_long(config
+ next
);
2345 cap_id
= PCI_EXT_CAP_ID(header
);
2346 cap_ver
= PCI_EXT_CAP_VER(header
);
2349 * If it becomes important to configure extended capabilities to their
2350 * actual size, use this as the default when it's something we don't
2351 * recognize. Since QEMU doesn't actually handle many of the config
2352 * accesses, exact size doesn't seem worthwhile.
2354 size
= vfio_ext_cap_max_size(config
, next
);
2356 /* Use emulated next pointer to allow dropping extended caps */
2357 pci_long_test_and_set_mask(vdev
->emulated_config_bits
+ next
,
2358 PCI_EXT_CAP_NEXT_MASK
);
2361 case 0: /* kernel masked capability */
2362 case PCI_EXT_CAP_ID_SRIOV
: /* Read-only VF BARs confuse OVMF */
2363 case PCI_EXT_CAP_ID_ARI
: /* XXX Needs next function virtualization */
2364 trace_vfio_add_ext_cap_dropped(vdev
->vbasedev
.name
, cap_id
, next
);
2366 case PCI_EXT_CAP_ID_REBAR
:
2367 if (!vfio_setup_rebar_ecap(vdev
, next
)) {
2368 pcie_add_capability(pdev
, cap_id
, cap_ver
, next
, size
);
2372 pcie_add_capability(pdev
, cap_id
, cap_ver
, next
, size
);
2377 /* Cleanup chain head ID if necessary */
2378 if (pci_get_word(pdev
->config
+ PCI_CONFIG_SPACE_SIZE
) == 0xFFFF) {
2379 pci_set_word(pdev
->config
+ PCI_CONFIG_SPACE_SIZE
, 0);
2386 static bool vfio_add_capabilities(VFIOPCIDevice
*vdev
, Error
**errp
)
2388 PCIDevice
*pdev
= &vdev
->pdev
;
2390 if (!(pdev
->config
[PCI_STATUS
] & PCI_STATUS_CAP_LIST
) ||
2391 !pdev
->config
[PCI_CAPABILITY_LIST
]) {
2392 return true; /* Nothing to add */
2395 if (!vfio_add_std_cap(vdev
, pdev
->config
[PCI_CAPABILITY_LIST
], errp
)) {
2399 vfio_add_ext_cap(vdev
);
2403 void vfio_pci_pre_reset(VFIOPCIDevice
*vdev
)
2405 PCIDevice
*pdev
= &vdev
->pdev
;
2408 vfio_disable_interrupts(vdev
);
2410 /* Make sure the device is in D0 */
2415 pmcsr
= vfio_pci_read_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, 2);
2416 state
= pmcsr
& PCI_PM_CTRL_STATE_MASK
;
2418 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
2419 vfio_pci_write_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, pmcsr
, 2);
2420 /* vfio handles the necessary delay here */
2421 pmcsr
= vfio_pci_read_config(pdev
, vdev
->pm_cap
+ PCI_PM_CTRL
, 2);
2422 state
= pmcsr
& PCI_PM_CTRL_STATE_MASK
;
2424 error_report("vfio: Unable to power on device, stuck in D%d",
2431 * Stop any ongoing DMA by disconnecting I/O, MMIO, and bus master.
2432 * Also put INTx Disable in known state.
2434 cmd
= vfio_pci_read_config(pdev
, PCI_COMMAND
, 2);
2435 cmd
&= ~(PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
|
2436 PCI_COMMAND_INTX_DISABLE
);
2437 vfio_pci_write_config(pdev
, PCI_COMMAND
, cmd
, 2);
2440 void vfio_pci_post_reset(VFIOPCIDevice
*vdev
)
2445 if (!vfio_intx_enable(vdev
, &err
)) {
2446 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2449 for (nr
= 0; nr
< PCI_NUM_REGIONS
- 1; ++nr
) {
2450 off_t addr
= vdev
->config_offset
+ PCI_BASE_ADDRESS_0
+ (4 * nr
);
2452 uint32_t len
= sizeof(val
);
2454 if (pwrite(vdev
->vbasedev
.fd
, &val
, len
, addr
) != len
) {
2455 error_report("%s(%s) reset bar %d failed: %m", __func__
,
2456 vdev
->vbasedev
.name
, nr
);
2460 vfio_quirk_reset(vdev
);
2463 bool vfio_pci_host_match(PCIHostDeviceAddress
*addr
, const char *name
)
2467 sprintf(tmp
, "%04x:%02x:%02x.%1x", addr
->domain
,
2468 addr
->bus
, addr
->slot
, addr
->function
);
2470 return (strcmp(tmp
, name
) == 0);
2473 int vfio_pci_get_pci_hot_reset_info(VFIOPCIDevice
*vdev
,
2474 struct vfio_pci_hot_reset_info
**info_p
)
2476 struct vfio_pci_hot_reset_info
*info
;
2479 assert(info_p
&& !*info_p
);
2481 info
= g_malloc0(sizeof(*info
));
2482 info
->argsz
= sizeof(*info
);
2484 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO
, info
);
2485 if (ret
&& errno
!= ENOSPC
) {
2488 if (!vdev
->has_pm_reset
) {
2489 error_report("vfio: Cannot reset device %s, "
2490 "no available reset mechanism.", vdev
->vbasedev
.name
);
2495 count
= info
->count
;
2496 info
= g_realloc(info
, sizeof(*info
) + (count
* sizeof(info
->devices
[0])));
2497 info
->argsz
= sizeof(*info
) + (count
* sizeof(info
->devices
[0]));
2499 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO
, info
);
2503 error_report("vfio: hot reset info failed: %m");
2511 static int vfio_pci_hot_reset(VFIOPCIDevice
*vdev
, bool single
)
2513 VFIODevice
*vbasedev
= &vdev
->vbasedev
;
2514 const VFIOIOMMUClass
*vioc
= VFIO_IOMMU_GET_CLASS(vbasedev
->bcontainer
);
2516 return vioc
->pci_hot_reset(vbasedev
, single
);
2520 * We want to differentiate hot reset of multiple in-use devices vs hot reset
2521 * of a single in-use device. VFIO_DEVICE_RESET will already handle the case
2522 * of doing hot resets when there is only a single device per bus. The in-use
2523 * here refers to how many VFIODevices are affected. A hot reset that affects
2524 * multiple devices, but only a single in-use device, means that we can call
2525 * it from our bus ->reset() callback since the extent is effectively a single
2526 * device. This allows us to make use of it in the hotplug path. When there
2527 * are multiple in-use devices, we can only trigger the hot reset during a
2528 * system reset and thus from our reset handler. We separate _one vs _multi
2529 * here so that we don't overlap and do a double reset on the system reset
2530 * path where both our reset handler and ->reset() callback are used. Calling
2531 * _one() will only do a hot reset for the one in-use devices case, calling
2532 * _multi() will do nothing if a _one() would have been sufficient.
2534 static int vfio_pci_hot_reset_one(VFIOPCIDevice
*vdev
)
2536 return vfio_pci_hot_reset(vdev
, true);
2539 static int vfio_pci_hot_reset_multi(VFIODevice
*vbasedev
)
2541 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
2542 return vfio_pci_hot_reset(vdev
, false);
2545 static void vfio_pci_compute_needs_reset(VFIODevice
*vbasedev
)
2547 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
2548 if (!vbasedev
->reset_works
|| (!vdev
->has_flr
&& vdev
->has_pm_reset
)) {
2549 vbasedev
->needs_reset
= true;
2553 static Object
*vfio_pci_get_object(VFIODevice
*vbasedev
)
2555 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
2557 return OBJECT(vdev
);
2560 static bool vfio_msix_present(void *opaque
, int version_id
)
2562 PCIDevice
*pdev
= opaque
;
2564 return msix_present(pdev
);
2567 static bool vfio_display_migration_needed(void *opaque
)
2569 VFIOPCIDevice
*vdev
= opaque
;
2572 * We need to migrate the VFIODisplay object if ramfb *migration* was
2573 * explicitly requested (in which case we enforced both ramfb=on and
2574 * display=on), or ramfb migration was left at the default "auto"
2575 * setting, and *ramfb* was explicitly requested (in which case we
2576 * enforced display=on).
2578 return vdev
->ramfb_migrate
== ON_OFF_AUTO_ON
||
2579 (vdev
->ramfb_migrate
== ON_OFF_AUTO_AUTO
&& vdev
->enable_ramfb
);
2582 static const VMStateDescription vmstate_vfio_display
= {
2583 .name
= "VFIOPCIDevice/VFIODisplay",
2585 .minimum_version_id
= 1,
2586 .needed
= vfio_display_migration_needed
,
2587 .fields
= (const VMStateField
[]){
2588 VMSTATE_STRUCT_POINTER(dpy
, VFIOPCIDevice
, vfio_display_vmstate
,
2590 VMSTATE_END_OF_LIST()
2594 static const VMStateDescription vmstate_vfio_pci_config
= {
2595 .name
= "VFIOPCIDevice",
2597 .minimum_version_id
= 1,
2598 .fields
= (const VMStateField
[]) {
2599 VMSTATE_PCI_DEVICE(pdev
, VFIOPCIDevice
),
2600 VMSTATE_MSIX_TEST(pdev
, VFIOPCIDevice
, vfio_msix_present
),
2601 VMSTATE_END_OF_LIST()
2603 .subsections
= (const VMStateDescription
* const []) {
2604 &vmstate_vfio_display
,
2609 static int vfio_pci_save_config(VFIODevice
*vbasedev
, QEMUFile
*f
, Error
**errp
)
2611 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
2613 return vmstate_save_state_with_err(f
, &vmstate_vfio_pci_config
, vdev
, NULL
,
2617 static int vfio_pci_load_config(VFIODevice
*vbasedev
, QEMUFile
*f
)
2619 VFIOPCIDevice
*vdev
= container_of(vbasedev
, VFIOPCIDevice
, vbasedev
);
2620 PCIDevice
*pdev
= &vdev
->pdev
;
2621 pcibus_t old_addr
[PCI_NUM_REGIONS
- 1];
2624 for (bar
= 0; bar
< PCI_ROM_SLOT
; bar
++) {
2625 old_addr
[bar
] = pdev
->io_regions
[bar
].addr
;
2628 ret
= vmstate_load_state(f
, &vmstate_vfio_pci_config
, vdev
, 1);
2633 vfio_pci_write_config(pdev
, PCI_COMMAND
,
2634 pci_get_word(pdev
->config
+ PCI_COMMAND
), 2);
2636 for (bar
= 0; bar
< PCI_ROM_SLOT
; bar
++) {
2638 * The address may not be changed in some scenarios
2639 * (e.g. the VF driver isn't loaded in VM).
2641 if (old_addr
[bar
] != pdev
->io_regions
[bar
].addr
&&
2642 vdev
->bars
[bar
].region
.size
> 0 &&
2643 vdev
->bars
[bar
].region
.size
< qemu_real_host_page_size()) {
2644 vfio_sub_page_bar_update_mapping(pdev
, bar
);
2648 if (msi_enabled(pdev
)) {
2649 vfio_msi_enable(vdev
);
2650 } else if (msix_enabled(pdev
)) {
2651 vfio_msix_enable(vdev
);
2657 static VFIODeviceOps vfio_pci_ops
= {
2658 .vfio_compute_needs_reset
= vfio_pci_compute_needs_reset
,
2659 .vfio_hot_reset_multi
= vfio_pci_hot_reset_multi
,
2660 .vfio_eoi
= vfio_intx_eoi
,
2661 .vfio_get_object
= vfio_pci_get_object
,
2662 .vfio_save_config
= vfio_pci_save_config
,
2663 .vfio_load_config
= vfio_pci_load_config
,
2666 bool vfio_populate_vga(VFIOPCIDevice
*vdev
, Error
**errp
)
2668 VFIODevice
*vbasedev
= &vdev
->vbasedev
;
2669 g_autofree
struct vfio_region_info
*reg_info
= NULL
;
2672 ret
= vfio_get_region_info(vbasedev
, VFIO_PCI_VGA_REGION_INDEX
, ®_info
);
2674 error_setg_errno(errp
, -ret
,
2675 "failed getting region info for VGA region index %d",
2676 VFIO_PCI_VGA_REGION_INDEX
);
2680 if (!(reg_info
->flags
& VFIO_REGION_INFO_FLAG_READ
) ||
2681 !(reg_info
->flags
& VFIO_REGION_INFO_FLAG_WRITE
) ||
2682 reg_info
->size
< 0xbffff + 1) {
2683 error_setg(errp
, "unexpected VGA info, flags 0x%lx, size 0x%lx",
2684 (unsigned long)reg_info
->flags
,
2685 (unsigned long)reg_info
->size
);
2689 vdev
->vga
= g_new0(VFIOVGA
, 1);
2691 vdev
->vga
->fd_offset
= reg_info
->offset
;
2692 vdev
->vga
->fd
= vdev
->vbasedev
.fd
;
2694 vdev
->vga
->region
[QEMU_PCI_VGA_MEM
].offset
= QEMU_PCI_VGA_MEM_BASE
;
2695 vdev
->vga
->region
[QEMU_PCI_VGA_MEM
].nr
= QEMU_PCI_VGA_MEM
;
2696 QLIST_INIT(&vdev
->vga
->region
[QEMU_PCI_VGA_MEM
].quirks
);
2698 memory_region_init_io(&vdev
->vga
->region
[QEMU_PCI_VGA_MEM
].mem
,
2699 OBJECT(vdev
), &vfio_vga_ops
,
2700 &vdev
->vga
->region
[QEMU_PCI_VGA_MEM
],
2701 "vfio-vga-mmio@0xa0000",
2702 QEMU_PCI_VGA_MEM_SIZE
);
2704 vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
].offset
= QEMU_PCI_VGA_IO_LO_BASE
;
2705 vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
].nr
= QEMU_PCI_VGA_IO_LO
;
2706 QLIST_INIT(&vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
].quirks
);
2708 memory_region_init_io(&vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
].mem
,
2709 OBJECT(vdev
), &vfio_vga_ops
,
2710 &vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
],
2711 "vfio-vga-io@0x3b0",
2712 QEMU_PCI_VGA_IO_LO_SIZE
);
2714 vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
].offset
= QEMU_PCI_VGA_IO_HI_BASE
;
2715 vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
].nr
= QEMU_PCI_VGA_IO_HI
;
2716 QLIST_INIT(&vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
].quirks
);
2718 memory_region_init_io(&vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
].mem
,
2719 OBJECT(vdev
), &vfio_vga_ops
,
2720 &vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
],
2721 "vfio-vga-io@0x3c0",
2722 QEMU_PCI_VGA_IO_HI_SIZE
);
2724 pci_register_vga(&vdev
->pdev
, &vdev
->vga
->region
[QEMU_PCI_VGA_MEM
].mem
,
2725 &vdev
->vga
->region
[QEMU_PCI_VGA_IO_LO
].mem
,
2726 &vdev
->vga
->region
[QEMU_PCI_VGA_IO_HI
].mem
);
2731 static bool vfio_populate_device(VFIOPCIDevice
*vdev
, Error
**errp
)
2733 VFIODevice
*vbasedev
= &vdev
->vbasedev
;
2734 g_autofree
struct vfio_region_info
*reg_info
= NULL
;
2735 struct vfio_irq_info irq_info
= { .argsz
= sizeof(irq_info
) };
2738 /* Sanity check device */
2739 if (!(vbasedev
->flags
& VFIO_DEVICE_FLAGS_PCI
)) {
2740 error_setg(errp
, "this isn't a PCI device");
2744 if (vbasedev
->num_regions
< VFIO_PCI_CONFIG_REGION_INDEX
+ 1) {
2745 error_setg(errp
, "unexpected number of io regions %u",
2746 vbasedev
->num_regions
);
2750 if (vbasedev
->num_irqs
< VFIO_PCI_MSIX_IRQ_INDEX
+ 1) {
2751 error_setg(errp
, "unexpected number of irqs %u", vbasedev
->num_irqs
);
2755 for (i
= VFIO_PCI_BAR0_REGION_INDEX
; i
< VFIO_PCI_ROM_REGION_INDEX
; i
++) {
2756 char *name
= g_strdup_printf("%s BAR %d", vbasedev
->name
, i
);
2758 ret
= vfio_region_setup(OBJECT(vdev
), vbasedev
,
2759 &vdev
->bars
[i
].region
, i
, name
);
2763 error_setg_errno(errp
, -ret
, "failed to get region %d info", i
);
2767 QLIST_INIT(&vdev
->bars
[i
].quirks
);
2770 ret
= vfio_get_region_info(vbasedev
,
2771 VFIO_PCI_CONFIG_REGION_INDEX
, ®_info
);
2773 error_setg_errno(errp
, -ret
, "failed to get config info");
2777 trace_vfio_populate_device_config(vdev
->vbasedev
.name
,
2778 (unsigned long)reg_info
->size
,
2779 (unsigned long)reg_info
->offset
,
2780 (unsigned long)reg_info
->flags
);
2782 vdev
->config_size
= reg_info
->size
;
2783 if (vdev
->config_size
== PCI_CONFIG_SPACE_SIZE
) {
2784 vdev
->pdev
.cap_present
&= ~QEMU_PCI_CAP_EXPRESS
;
2786 vdev
->config_offset
= reg_info
->offset
;
2788 if (vdev
->features
& VFIO_FEATURE_ENABLE_VGA
) {
2789 if (!vfio_populate_vga(vdev
, errp
)) {
2790 error_append_hint(errp
, "device does not support "
2791 "requested feature x-vga\n");
2796 irq_info
.index
= VFIO_PCI_ERR_IRQ_INDEX
;
2798 ret
= ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_GET_IRQ_INFO
, &irq_info
);
2800 /* This can fail for an old kernel or legacy PCI dev */
2801 trace_vfio_populate_device_get_irq_info_failure(strerror(errno
));
2802 } else if (irq_info
.count
== 1) {
2803 vdev
->pci_aer
= true;
2805 warn_report(VFIO_MSG_PREFIX
2806 "Could not enable error recovery for the device",
2813 static void vfio_pci_put_device(VFIOPCIDevice
*vdev
)
2815 vfio_detach_device(&vdev
->vbasedev
);
2817 g_free(vdev
->vbasedev
.name
);
2821 static void vfio_err_notifier_handler(void *opaque
)
2823 VFIOPCIDevice
*vdev
= opaque
;
2825 if (!event_notifier_test_and_clear(&vdev
->err_notifier
)) {
2830 * TBD. Retrieve the error details and decide what action
2831 * needs to be taken. One of the actions could be to pass
2832 * the error to the guest and have the guest driver recover
2833 * from the error. This requires that PCIe capabilities be
2834 * exposed to the guest. For now, we just terminate the
2835 * guest to contain the error.
2838 error_report("%s(%s) Unrecoverable error detected. Please collect any data possible and then kill the guest", __func__
, vdev
->vbasedev
.name
);
2840 vm_stop(RUN_STATE_INTERNAL_ERROR
);
2844 * Registers error notifier for devices supporting error recovery.
2845 * If we encounter a failure in this function, we report an error
2846 * and continue after disabling error recovery support for the
2849 static void vfio_register_err_notifier(VFIOPCIDevice
*vdev
)
2854 if (!vdev
->pci_aer
) {
2858 if (event_notifier_init(&vdev
->err_notifier
, 0)) {
2859 error_report("vfio: Unable to init event notifier for error detection");
2860 vdev
->pci_aer
= false;
2864 fd
= event_notifier_get_fd(&vdev
->err_notifier
);
2865 qemu_set_fd_handler(fd
, vfio_err_notifier_handler
, NULL
, vdev
);
2867 if (!vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_ERR_IRQ_INDEX
, 0,
2868 VFIO_IRQ_SET_ACTION_TRIGGER
, fd
, &err
)) {
2869 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2870 qemu_set_fd_handler(fd
, NULL
, NULL
, vdev
);
2871 event_notifier_cleanup(&vdev
->err_notifier
);
2872 vdev
->pci_aer
= false;
2876 static void vfio_unregister_err_notifier(VFIOPCIDevice
*vdev
)
2880 if (!vdev
->pci_aer
) {
2884 if (!vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_ERR_IRQ_INDEX
, 0,
2885 VFIO_IRQ_SET_ACTION_TRIGGER
, -1, &err
)) {
2886 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2888 qemu_set_fd_handler(event_notifier_get_fd(&vdev
->err_notifier
),
2890 event_notifier_cleanup(&vdev
->err_notifier
);
2893 static void vfio_req_notifier_handler(void *opaque
)
2895 VFIOPCIDevice
*vdev
= opaque
;
2898 if (!event_notifier_test_and_clear(&vdev
->req_notifier
)) {
2902 qdev_unplug(DEVICE(vdev
), &err
);
2904 warn_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2908 static void vfio_register_req_notifier(VFIOPCIDevice
*vdev
)
2910 struct vfio_irq_info irq_info
= { .argsz
= sizeof(irq_info
),
2911 .index
= VFIO_PCI_REQ_IRQ_INDEX
};
2915 if (!(vdev
->features
& VFIO_FEATURE_ENABLE_REQ
)) {
2919 if (ioctl(vdev
->vbasedev
.fd
,
2920 VFIO_DEVICE_GET_IRQ_INFO
, &irq_info
) < 0 || irq_info
.count
< 1) {
2924 if (event_notifier_init(&vdev
->req_notifier
, 0)) {
2925 error_report("vfio: Unable to init event notifier for device request");
2929 fd
= event_notifier_get_fd(&vdev
->req_notifier
);
2930 qemu_set_fd_handler(fd
, vfio_req_notifier_handler
, NULL
, vdev
);
2932 if (!vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_REQ_IRQ_INDEX
, 0,
2933 VFIO_IRQ_SET_ACTION_TRIGGER
, fd
, &err
)) {
2934 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2935 qemu_set_fd_handler(fd
, NULL
, NULL
, vdev
);
2936 event_notifier_cleanup(&vdev
->req_notifier
);
2938 vdev
->req_enabled
= true;
2942 static void vfio_unregister_req_notifier(VFIOPCIDevice
*vdev
)
2946 if (!vdev
->req_enabled
) {
2950 if (!vfio_set_irq_signaling(&vdev
->vbasedev
, VFIO_PCI_REQ_IRQ_INDEX
, 0,
2951 VFIO_IRQ_SET_ACTION_TRIGGER
, -1, &err
)) {
2952 error_reportf_err(err
, VFIO_MSG_PREFIX
, vdev
->vbasedev
.name
);
2954 qemu_set_fd_handler(event_notifier_get_fd(&vdev
->req_notifier
),
2956 event_notifier_cleanup(&vdev
->req_notifier
);
2958 vdev
->req_enabled
= false;
2961 static void vfio_realize(PCIDevice
*pdev
, Error
**errp
)
2964 VFIOPCIDevice
*vdev
= VFIO_PCI(pdev
);
2965 VFIODevice
*vbasedev
= &vdev
->vbasedev
;
2967 char uuid
[UUID_STR_LEN
];
2968 g_autofree
char *name
= NULL
;
2970 if (vbasedev
->fd
< 0 && !vbasedev
->sysfsdev
) {
2971 if (!(~vdev
->host
.domain
|| ~vdev
->host
.bus
||
2972 ~vdev
->host
.slot
|| ~vdev
->host
.function
)) {
2973 error_setg(errp
, "No provided host device");
2974 error_append_hint(errp
, "Use -device vfio-pci,host=DDDD:BB:DD.F "
2975 #ifdef CONFIG_IOMMUFD
2976 "or -device vfio-pci,fd=DEVICE_FD "
2978 "or -device vfio-pci,sysfsdev=PATH_TO_DEVICE\n");
2981 vbasedev
->sysfsdev
=
2982 g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%01x",
2983 vdev
->host
.domain
, vdev
->host
.bus
,
2984 vdev
->host
.slot
, vdev
->host
.function
);
2987 if (!vfio_device_get_name(vbasedev
, errp
)) {
2992 * Mediated devices *might* operate compatibly with discarding of RAM, but
2993 * we cannot know for certain, it depends on whether the mdev vendor driver
2994 * stays in sync with the active working set of the guest driver. Prevent
2995 * the x-balloon-allowed option unless this is minimally an mdev device.
2997 vbasedev
->mdev
= vfio_device_is_mdev(vbasedev
);
2999 trace_vfio_mdev(vbasedev
->name
, vbasedev
->mdev
);
3001 if (vbasedev
->ram_block_discard_allowed
&& !vbasedev
->mdev
) {
3002 error_setg(errp
, "x-balloon-allowed only potentially compatible "
3003 "with mdev devices");
3007 if (!qemu_uuid_is_null(&vdev
->vf_token
)) {
3008 qemu_uuid_unparse(&vdev
->vf_token
, uuid
);
3009 name
= g_strdup_printf("%s vf_token=%s", vbasedev
->name
, uuid
);
3011 name
= g_strdup(vbasedev
->name
);
3014 if (!vfio_attach_device(name
, vbasedev
,
3015 pci_device_iommu_address_space(pdev
), errp
)) {
3019 if (!vfio_populate_device(vdev
, errp
)) {
3023 /* Get a copy of config space */
3024 ret
= pread(vbasedev
->fd
, vdev
->pdev
.config
,
3025 MIN(pci_config_size(&vdev
->pdev
), vdev
->config_size
),
3026 vdev
->config_offset
);
3027 if (ret
< (int)MIN(pci_config_size(&vdev
->pdev
), vdev
->config_size
)) {
3028 ret
= ret
< 0 ? -errno
: -EFAULT
;
3029 error_setg_errno(errp
, -ret
, "failed to read device config space");
3033 /* vfio emulates a lot for us, but some bits need extra love */
3034 vdev
->emulated_config_bits
= g_malloc0(vdev
->config_size
);
3036 /* QEMU can choose to expose the ROM or not */
3037 memset(vdev
->emulated_config_bits
+ PCI_ROM_ADDRESS
, 0xff, 4);
3038 /* QEMU can also add or extend BARs */
3039 memset(vdev
->emulated_config_bits
+ PCI_BASE_ADDRESS_0
, 0xff, 6 * 4);
3042 * The PCI spec reserves vendor ID 0xffff as an invalid value. The
3043 * device ID is managed by the vendor and need only be a 16-bit value.
3044 * Allow any 16-bit value for subsystem so they can be hidden or changed.
3046 if (vdev
->vendor_id
!= PCI_ANY_ID
) {
3047 if (vdev
->vendor_id
>= 0xffff) {
3048 error_setg(errp
, "invalid PCI vendor ID provided");
3051 vfio_add_emulated_word(vdev
, PCI_VENDOR_ID
, vdev
->vendor_id
, ~0);
3052 trace_vfio_pci_emulated_vendor_id(vbasedev
->name
, vdev
->vendor_id
);
3054 vdev
->vendor_id
= pci_get_word(pdev
->config
+ PCI_VENDOR_ID
);
3057 if (vdev
->device_id
!= PCI_ANY_ID
) {
3058 if (vdev
->device_id
> 0xffff) {
3059 error_setg(errp
, "invalid PCI device ID provided");
3062 vfio_add_emulated_word(vdev
, PCI_DEVICE_ID
, vdev
->device_id
, ~0);
3063 trace_vfio_pci_emulated_device_id(vbasedev
->name
, vdev
->device_id
);
3065 vdev
->device_id
= pci_get_word(pdev
->config
+ PCI_DEVICE_ID
);
3068 if (vdev
->sub_vendor_id
!= PCI_ANY_ID
) {
3069 if (vdev
->sub_vendor_id
> 0xffff) {
3070 error_setg(errp
, "invalid PCI subsystem vendor ID provided");
3073 vfio_add_emulated_word(vdev
, PCI_SUBSYSTEM_VENDOR_ID
,
3074 vdev
->sub_vendor_id
, ~0);
3075 trace_vfio_pci_emulated_sub_vendor_id(vbasedev
->name
,
3076 vdev
->sub_vendor_id
);
3079 if (vdev
->sub_device_id
!= PCI_ANY_ID
) {
3080 if (vdev
->sub_device_id
> 0xffff) {
3081 error_setg(errp
, "invalid PCI subsystem device ID provided");
3084 vfio_add_emulated_word(vdev
, PCI_SUBSYSTEM_ID
, vdev
->sub_device_id
, ~0);
3085 trace_vfio_pci_emulated_sub_device_id(vbasedev
->name
,
3086 vdev
->sub_device_id
);
3089 /* QEMU can change multi-function devices to single function, or reverse */
3090 vdev
->emulated_config_bits
[PCI_HEADER_TYPE
] =
3091 PCI_HEADER_TYPE_MULTI_FUNCTION
;
3093 /* Restore or clear multifunction, this is always controlled by QEMU */
3094 if (vdev
->pdev
.cap_present
& QEMU_PCI_CAP_MULTIFUNCTION
) {
3095 vdev
->pdev
.config
[PCI_HEADER_TYPE
] |= PCI_HEADER_TYPE_MULTI_FUNCTION
;
3097 vdev
->pdev
.config
[PCI_HEADER_TYPE
] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION
;
3101 * Clear host resource mapping info. If we choose not to register a
3102 * BAR, such as might be the case with the option ROM, we can get
3103 * confusing, unwritable, residual addresses from the host here.
3105 memset(&vdev
->pdev
.config
[PCI_BASE_ADDRESS_0
], 0, 24);
3106 memset(&vdev
->pdev
.config
[PCI_ROM_ADDRESS
], 0, 4);
3108 vfio_pci_size_rom(vdev
);
3110 vfio_bars_prepare(vdev
);
3112 if (!vfio_msix_early_setup(vdev
, errp
)) {
3116 vfio_bars_register(vdev
);
3118 if (!vbasedev
->mdev
&&
3119 !pci_device_set_iommu_device(pdev
, vbasedev
->hiod
, errp
)) {
3120 error_prepend(errp
, "Failed to set iommu_device: ");
3124 if (!vfio_add_capabilities(vdev
, errp
)) {
3125 goto out_unset_idev
;
3129 vfio_vga_quirk_setup(vdev
);
3132 for (i
= 0; i
< PCI_ROM_SLOT
; i
++) {
3133 vfio_bar_quirk_setup(vdev
, i
);
3136 if (!vdev
->igd_opregion
&&
3137 vdev
->features
& VFIO_FEATURE_ENABLE_IGD_OPREGION
) {
3138 g_autofree
struct vfio_region_info
*opregion
= NULL
;
3140 if (vdev
->pdev
.qdev
.hotplugged
) {
3142 "cannot support IGD OpRegion feature on hotplugged "
3144 goto out_unset_idev
;
3147 ret
= vfio_get_dev_region_info(vbasedev
,
3148 VFIO_REGION_TYPE_PCI_VENDOR_TYPE
| PCI_VENDOR_ID_INTEL
,
3149 VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION
, &opregion
);
3151 error_setg_errno(errp
, -ret
,
3152 "does not support requested IGD OpRegion feature");
3153 goto out_unset_idev
;
3156 if (!vfio_pci_igd_opregion_init(vdev
, opregion
, errp
)) {
3157 goto out_unset_idev
;
3161 /* QEMU emulates all of MSI & MSIX */
3162 if (pdev
->cap_present
& QEMU_PCI_CAP_MSIX
) {
3163 memset(vdev
->emulated_config_bits
+ pdev
->msix_cap
, 0xff,
3167 if (pdev
->cap_present
& QEMU_PCI_CAP_MSI
) {
3168 memset(vdev
->emulated_config_bits
+ pdev
->msi_cap
, 0xff,
3169 vdev
->msi_cap_size
);
3172 if (vfio_pci_read_config(&vdev
->pdev
, PCI_INTERRUPT_PIN
, 1)) {
3173 vdev
->intx
.mmap_timer
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
3174 vfio_intx_mmap_enable
, vdev
);
3175 pci_device_set_intx_routing_notifier(&vdev
->pdev
,
3176 vfio_intx_routing_notifier
);
3177 vdev
->irqchip_change_notifier
.notify
= vfio_irqchip_change
;
3178 kvm_irqchip_add_change_notifier(&vdev
->irqchip_change_notifier
);
3179 if (!vfio_intx_enable(vdev
, errp
)) {
3180 goto out_deregister
;
3184 if (vdev
->display
!= ON_OFF_AUTO_OFF
) {
3185 if (!vfio_display_probe(vdev
, errp
)) {
3186 goto out_deregister
;
3189 if (vdev
->enable_ramfb
&& vdev
->dpy
== NULL
) {
3190 error_setg(errp
, "ramfb=on requires display=on");
3191 goto out_deregister
;
3193 if (vdev
->display_xres
|| vdev
->display_yres
) {
3194 if (vdev
->dpy
== NULL
) {
3195 error_setg(errp
, "xres and yres properties require display=on");
3196 goto out_deregister
;
3198 if (vdev
->dpy
->edid_regs
== NULL
) {
3199 error_setg(errp
, "xres and yres properties need edid support");
3200 goto out_deregister
;
3204 if (vdev
->ramfb_migrate
== ON_OFF_AUTO_ON
&& !vdev
->enable_ramfb
) {
3205 warn_report("x-ramfb-migrate=on but ramfb=off. "
3206 "Forcing x-ramfb-migrate to off.");
3207 vdev
->ramfb_migrate
= ON_OFF_AUTO_OFF
;
3209 if (vbasedev
->enable_migration
== ON_OFF_AUTO_OFF
) {
3210 if (vdev
->ramfb_migrate
== ON_OFF_AUTO_AUTO
) {
3211 vdev
->ramfb_migrate
= ON_OFF_AUTO_OFF
;
3212 } else if (vdev
->ramfb_migrate
== ON_OFF_AUTO_ON
) {
3213 error_setg(errp
, "x-ramfb-migrate requires enable-migration");
3214 goto out_deregister
;
3218 if (!pdev
->failover_pair_id
) {
3219 if (!vfio_migration_realize(vbasedev
, errp
)) {
3220 goto out_deregister
;
3224 vfio_register_err_notifier(vdev
);
3225 vfio_register_req_notifier(vdev
);
3226 vfio_setup_resetfn_quirk(vdev
);
3231 if (vdev
->interrupt
== VFIO_INT_INTx
) {
3232 vfio_intx_disable(vdev
);
3234 pci_device_set_intx_routing_notifier(&vdev
->pdev
, NULL
);
3235 if (vdev
->irqchip_change_notifier
.notify
) {
3236 kvm_irqchip_remove_change_notifier(&vdev
->irqchip_change_notifier
);
3238 if (vdev
->intx
.mmap_timer
) {
3239 timer_free(vdev
->intx
.mmap_timer
);
3242 if (!vbasedev
->mdev
) {
3243 pci_device_unset_iommu_device(pdev
);
3246 vfio_teardown_msi(vdev
);
3247 vfio_bars_exit(vdev
);
3249 error_prepend(errp
, VFIO_MSG_PREFIX
, vbasedev
->name
);
3252 static void vfio_instance_finalize(Object
*obj
)
3254 VFIOPCIDevice
*vdev
= VFIO_PCI(obj
);
3256 vfio_display_finalize(vdev
);
3257 vfio_bars_finalize(vdev
);
3258 g_free(vdev
->emulated_config_bits
);
3261 * XXX Leaking igd_opregion is not an oversight, we can't remove the
3262 * fw_cfg entry therefore leaking this allocation seems like the safest
3265 * g_free(vdev->igd_opregion);
3267 vfio_pci_put_device(vdev
);
3270 static void vfio_exitfn(PCIDevice
*pdev
)
3272 VFIOPCIDevice
*vdev
= VFIO_PCI(pdev
);
3273 VFIODevice
*vbasedev
= &vdev
->vbasedev
;
3275 vfio_unregister_req_notifier(vdev
);
3276 vfio_unregister_err_notifier(vdev
);
3277 pci_device_set_intx_routing_notifier(&vdev
->pdev
, NULL
);
3278 if (vdev
->irqchip_change_notifier
.notify
) {
3279 kvm_irqchip_remove_change_notifier(&vdev
->irqchip_change_notifier
);
3281 vfio_disable_interrupts(vdev
);
3282 if (vdev
->intx
.mmap_timer
) {
3283 timer_free(vdev
->intx
.mmap_timer
);
3285 vfio_teardown_msi(vdev
);
3286 vfio_pci_disable_rp_atomics(vdev
);
3287 vfio_bars_exit(vdev
);
3288 vfio_migration_exit(vbasedev
);
3289 if (!vbasedev
->mdev
) {
3290 pci_device_unset_iommu_device(pdev
);
3294 static void vfio_pci_reset(DeviceState
*dev
)
3296 VFIOPCIDevice
*vdev
= VFIO_PCI(dev
);
3298 trace_vfio_pci_reset(vdev
->vbasedev
.name
);
3300 vfio_pci_pre_reset(vdev
);
3302 if (vdev
->display
!= ON_OFF_AUTO_OFF
) {
3303 vfio_display_reset(vdev
);
3306 if (vdev
->resetfn
&& !vdev
->resetfn(vdev
)) {
3310 if (vdev
->vbasedev
.reset_works
&&
3311 (vdev
->has_flr
|| !vdev
->has_pm_reset
) &&
3312 !ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_RESET
)) {
3313 trace_vfio_pci_reset_flr(vdev
->vbasedev
.name
);
3317 /* See if we can do our own bus reset */
3318 if (!vfio_pci_hot_reset_one(vdev
)) {
3322 /* If nothing else works and the device supports PM reset, use it */
3323 if (vdev
->vbasedev
.reset_works
&& vdev
->has_pm_reset
&&
3324 !ioctl(vdev
->vbasedev
.fd
, VFIO_DEVICE_RESET
)) {
3325 trace_vfio_pci_reset_pm(vdev
->vbasedev
.name
);
3330 vfio_pci_post_reset(vdev
);
3333 static void vfio_instance_init(Object
*obj
)
3335 PCIDevice
*pci_dev
= PCI_DEVICE(obj
);
3336 VFIOPCIDevice
*vdev
= VFIO_PCI(obj
);
3337 VFIODevice
*vbasedev
= &vdev
->vbasedev
;
3339 device_add_bootindex_property(obj
, &vdev
->bootindex
,
3342 vdev
->host
.domain
= ~0U;
3343 vdev
->host
.bus
= ~0U;
3344 vdev
->host
.slot
= ~0U;
3345 vdev
->host
.function
= ~0U;
3347 vfio_device_init(vbasedev
, VFIO_DEVICE_TYPE_PCI
, &vfio_pci_ops
,
3348 DEVICE(vdev
), false);
3350 vdev
->nv_gpudirect_clique
= 0xFF;
3352 /* QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command
3353 * line, therefore, no need to wait to realize like other devices */
3354 pci_dev
->cap_present
|= QEMU_PCI_CAP_EXPRESS
;
3357 static Property vfio_pci_dev_properties
[] = {
3358 DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice
, host
),
3359 DEFINE_PROP_UUID_NODEFAULT("vf-token", VFIOPCIDevice
, vf_token
),
3360 DEFINE_PROP_STRING("sysfsdev", VFIOPCIDevice
, vbasedev
.sysfsdev
),
3361 DEFINE_PROP_ON_OFF_AUTO("x-pre-copy-dirty-page-tracking", VFIOPCIDevice
,
3362 vbasedev
.pre_copy_dirty_page_tracking
,
3364 DEFINE_PROP_ON_OFF_AUTO("x-device-dirty-page-tracking", VFIOPCIDevice
,
3365 vbasedev
.device_dirty_page_tracking
,
3367 DEFINE_PROP_ON_OFF_AUTO("display", VFIOPCIDevice
,
3368 display
, ON_OFF_AUTO_OFF
),
3369 DEFINE_PROP_UINT32("xres", VFIOPCIDevice
, display_xres
, 0),
3370 DEFINE_PROP_UINT32("yres", VFIOPCIDevice
, display_yres
, 0),
3371 DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIOPCIDevice
,
3372 intx
.mmap_timeout
, 1100),
3373 DEFINE_PROP_BIT("x-vga", VFIOPCIDevice
, features
,
3374 VFIO_FEATURE_ENABLE_VGA_BIT
, false),
3375 DEFINE_PROP_BIT("x-req", VFIOPCIDevice
, features
,
3376 VFIO_FEATURE_ENABLE_REQ_BIT
, true),
3377 DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice
, features
,
3378 VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT
, false),
3379 DEFINE_PROP_ON_OFF_AUTO("enable-migration", VFIOPCIDevice
,
3380 vbasedev
.enable_migration
, ON_OFF_AUTO_AUTO
),
3381 DEFINE_PROP_BOOL("migration-events", VFIOPCIDevice
,
3382 vbasedev
.migration_events
, false),
3383 DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice
, vbasedev
.no_mmap
, false),
3384 DEFINE_PROP_BOOL("x-balloon-allowed", VFIOPCIDevice
,
3385 vbasedev
.ram_block_discard_allowed
, false),
3386 DEFINE_PROP_BOOL("x-no-kvm-intx", VFIOPCIDevice
, no_kvm_intx
, false),
3387 DEFINE_PROP_BOOL("x-no-kvm-msi", VFIOPCIDevice
, no_kvm_msi
, false),
3388 DEFINE_PROP_BOOL("x-no-kvm-msix", VFIOPCIDevice
, no_kvm_msix
, false),
3389 DEFINE_PROP_BOOL("x-no-geforce-quirks", VFIOPCIDevice
,
3390 no_geforce_quirks
, false),
3391 DEFINE_PROP_BOOL("x-no-kvm-ioeventfd", VFIOPCIDevice
, no_kvm_ioeventfd
,
3393 DEFINE_PROP_BOOL("x-no-vfio-ioeventfd", VFIOPCIDevice
, no_vfio_ioeventfd
,
3395 DEFINE_PROP_UINT32("x-pci-vendor-id", VFIOPCIDevice
, vendor_id
, PCI_ANY_ID
),
3396 DEFINE_PROP_UINT32("x-pci-device-id", VFIOPCIDevice
, device_id
, PCI_ANY_ID
),
3397 DEFINE_PROP_UINT32("x-pci-sub-vendor-id", VFIOPCIDevice
,
3398 sub_vendor_id
, PCI_ANY_ID
),
3399 DEFINE_PROP_UINT32("x-pci-sub-device-id", VFIOPCIDevice
,
3400 sub_device_id
, PCI_ANY_ID
),
3401 DEFINE_PROP_UINT32("x-igd-gms", VFIOPCIDevice
, igd_gms
, 0),
3402 DEFINE_PROP_UNSIGNED_NODEFAULT("x-nv-gpudirect-clique", VFIOPCIDevice
,
3403 nv_gpudirect_clique
,
3404 qdev_prop_nv_gpudirect_clique
, uint8_t),
3405 DEFINE_PROP_OFF_AUTO_PCIBAR("x-msix-relocation", VFIOPCIDevice
, msix_relo
,
3406 OFF_AUTO_PCIBAR_OFF
),
3407 #ifdef CONFIG_IOMMUFD
3408 DEFINE_PROP_LINK("iommufd", VFIOPCIDevice
, vbasedev
.iommufd
,
3409 TYPE_IOMMUFD_BACKEND
, IOMMUFDBackend
*),
3411 DEFINE_PROP_BOOL("skip-vsc-check", VFIOPCIDevice
, skip_vsc_check
, true),
3412 DEFINE_PROP_END_OF_LIST(),
3415 #ifdef CONFIG_IOMMUFD
3416 static void vfio_pci_set_fd(Object
*obj
, const char *str
, Error
**errp
)
3418 vfio_device_set_fd(&VFIO_PCI(obj
)->vbasedev
, str
, errp
);
3422 static void vfio_pci_dev_class_init(ObjectClass
*klass
, void *data
)
3424 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3425 PCIDeviceClass
*pdc
= PCI_DEVICE_CLASS(klass
);
3427 device_class_set_legacy_reset(dc
, vfio_pci_reset
);
3428 device_class_set_props(dc
, vfio_pci_dev_properties
);
3429 #ifdef CONFIG_IOMMUFD
3430 object_class_property_add_str(klass
, "fd", NULL
, vfio_pci_set_fd
);
3432 dc
->desc
= "VFIO-based PCI device assignment";
3433 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
3434 pdc
->realize
= vfio_realize
;
3435 pdc
->exit
= vfio_exitfn
;
3436 pdc
->config_read
= vfio_pci_read_config
;
3437 pdc
->config_write
= vfio_pci_write_config
;
3440 static const TypeInfo vfio_pci_dev_info
= {
3441 .name
= TYPE_VFIO_PCI
,
3442 .parent
= TYPE_PCI_DEVICE
,
3443 .instance_size
= sizeof(VFIOPCIDevice
),
3444 .class_init
= vfio_pci_dev_class_init
,
3445 .instance_init
= vfio_instance_init
,
3446 .instance_finalize
= vfio_instance_finalize
,
3447 .interfaces
= (InterfaceInfo
[]) {
3448 { INTERFACE_PCIE_DEVICE
},
3449 { INTERFACE_CONVENTIONAL_PCI_DEVICE
},
3454 static Property vfio_pci_dev_nohotplug_properties
[] = {
3455 DEFINE_PROP_BOOL("ramfb", VFIOPCIDevice
, enable_ramfb
, false),
3456 DEFINE_PROP_ON_OFF_AUTO("x-ramfb-migrate", VFIOPCIDevice
, ramfb_migrate
,
3458 DEFINE_PROP_END_OF_LIST(),
3461 static void vfio_pci_nohotplug_dev_class_init(ObjectClass
*klass
, void *data
)
3463 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3465 device_class_set_props(dc
, vfio_pci_dev_nohotplug_properties
);
3466 dc
->hotpluggable
= false;
3469 static const TypeInfo vfio_pci_nohotplug_dev_info
= {
3470 .name
= TYPE_VFIO_PCI_NOHOTPLUG
,
3471 .parent
= TYPE_VFIO_PCI
,
3472 .instance_size
= sizeof(VFIOPCIDevice
),
3473 .class_init
= vfio_pci_nohotplug_dev_class_init
,
3476 static void register_vfio_pci_dev_type(void)
3478 type_register_static(&vfio_pci_dev_info
);
3479 type_register_static(&vfio_pci_nohotplug_dev_info
);
3482 type_init(register_vfio_pci_dev_type
)