4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2009 CodeSourcery
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paul Brook <paul@codesourcery.com>
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
18 #include "qemu/osdep.h"
20 #include "exec/memop.h"
21 #include "standard-headers/linux/virtio_pci.h"
22 #include "hw/boards.h"
23 #include "hw/virtio/virtio.h"
24 #include "migration/qemu-file-types.h"
25 #include "hw/pci/pci.h"
26 #include "hw/pci/pci_bus.h"
27 #include "hw/qdev-properties.h"
28 #include "qapi/error.h"
29 #include "qemu/error-report.h"
31 #include "qemu/module.h"
32 #include "hw/pci/msi.h"
33 #include "hw/pci/msix.h"
34 #include "hw/loader.h"
35 #include "sysemu/kvm.h"
36 #include "virtio-pci.h"
37 #include "qemu/range.h"
38 #include "hw/virtio/virtio-bus.h"
39 #include "qapi/visitor.h"
40 #include "sysemu/replay.h"
42 #define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev))
44 #undef VIRTIO_PCI_CONFIG
46 /* The remaining space is defined by each driver as the per-driver
47 * configuration space */
48 #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
50 static void virtio_pci_bus_new(VirtioBusState
*bus
, size_t bus_size
,
52 static void virtio_pci_reset(DeviceState
*qdev
);
55 /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */
56 static inline VirtIOPCIProxy
*to_virtio_pci_proxy(DeviceState
*d
)
58 return container_of(d
, VirtIOPCIProxy
, pci_dev
.qdev
);
61 /* DeviceState to VirtIOPCIProxy. Note: used on datapath,
62 * be careful and test performance if you change this.
64 static inline VirtIOPCIProxy
*to_virtio_pci_proxy_fast(DeviceState
*d
)
66 return container_of(d
, VirtIOPCIProxy
, pci_dev
.qdev
);
69 static void virtio_pci_notify(DeviceState
*d
, uint16_t vector
)
71 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy_fast(d
);
73 if (msix_enabled(&proxy
->pci_dev
))
74 msix_notify(&proxy
->pci_dev
, vector
);
76 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
77 pci_set_irq(&proxy
->pci_dev
, qatomic_read(&vdev
->isr
) & 1);
81 static void virtio_pci_save_config(DeviceState
*d
, QEMUFile
*f
)
83 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
84 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
86 pci_device_save(&proxy
->pci_dev
, f
);
87 msix_save(&proxy
->pci_dev
, f
);
88 if (msix_present(&proxy
->pci_dev
))
89 qemu_put_be16(f
, vdev
->config_vector
);
92 static const VMStateDescription vmstate_virtio_pci_modern_queue_state
= {
93 .name
= "virtio_pci/modern_queue_state",
95 .minimum_version_id
= 1,
96 .fields
= (VMStateField
[]) {
97 VMSTATE_UINT16(num
, VirtIOPCIQueue
),
98 VMSTATE_UNUSED(1), /* enabled was stored as be16 */
99 VMSTATE_BOOL(enabled
, VirtIOPCIQueue
),
100 VMSTATE_UINT32_ARRAY(desc
, VirtIOPCIQueue
, 2),
101 VMSTATE_UINT32_ARRAY(avail
, VirtIOPCIQueue
, 2),
102 VMSTATE_UINT32_ARRAY(used
, VirtIOPCIQueue
, 2),
103 VMSTATE_END_OF_LIST()
107 static bool virtio_pci_modern_state_needed(void *opaque
)
109 VirtIOPCIProxy
*proxy
= opaque
;
111 return virtio_pci_modern(proxy
);
114 static const VMStateDescription vmstate_virtio_pci_modern_state_sub
= {
115 .name
= "virtio_pci/modern_state",
117 .minimum_version_id
= 1,
118 .needed
= &virtio_pci_modern_state_needed
,
119 .fields
= (VMStateField
[]) {
120 VMSTATE_UINT32(dfselect
, VirtIOPCIProxy
),
121 VMSTATE_UINT32(gfselect
, VirtIOPCIProxy
),
122 VMSTATE_UINT32_ARRAY(guest_features
, VirtIOPCIProxy
, 2),
123 VMSTATE_STRUCT_ARRAY(vqs
, VirtIOPCIProxy
, VIRTIO_QUEUE_MAX
, 0,
124 vmstate_virtio_pci_modern_queue_state
,
126 VMSTATE_END_OF_LIST()
130 static const VMStateDescription vmstate_virtio_pci
= {
131 .name
= "virtio_pci",
133 .minimum_version_id
= 1,
134 .fields
= (VMStateField
[]) {
135 VMSTATE_END_OF_LIST()
137 .subsections
= (const VMStateDescription
*[]) {
138 &vmstate_virtio_pci_modern_state_sub
,
143 static bool virtio_pci_has_extra_state(DeviceState
*d
)
145 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
147 return proxy
->flags
& VIRTIO_PCI_FLAG_MIGRATE_EXTRA
;
150 static void virtio_pci_save_extra_state(DeviceState
*d
, QEMUFile
*f
)
152 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
154 vmstate_save_state(f
, &vmstate_virtio_pci
, proxy
, NULL
);
157 static int virtio_pci_load_extra_state(DeviceState
*d
, QEMUFile
*f
)
159 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
161 return vmstate_load_state(f
, &vmstate_virtio_pci
, proxy
, 1);
164 static void virtio_pci_save_queue(DeviceState
*d
, int n
, QEMUFile
*f
)
166 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
167 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
169 if (msix_present(&proxy
->pci_dev
))
170 qemu_put_be16(f
, virtio_queue_vector(vdev
, n
));
173 static int virtio_pci_load_config(DeviceState
*d
, QEMUFile
*f
)
175 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
176 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
179 ret
= pci_device_load(&proxy
->pci_dev
, f
);
183 msix_unuse_all_vectors(&proxy
->pci_dev
);
184 msix_load(&proxy
->pci_dev
, f
);
185 if (msix_present(&proxy
->pci_dev
)) {
186 qemu_get_be16s(f
, &vdev
->config_vector
);
188 vdev
->config_vector
= VIRTIO_NO_VECTOR
;
190 if (vdev
->config_vector
!= VIRTIO_NO_VECTOR
) {
191 return msix_vector_use(&proxy
->pci_dev
, vdev
->config_vector
);
196 static int virtio_pci_load_queue(DeviceState
*d
, int n
, QEMUFile
*f
)
198 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
199 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
202 if (msix_present(&proxy
->pci_dev
)) {
203 qemu_get_be16s(f
, &vector
);
205 vector
= VIRTIO_NO_VECTOR
;
207 virtio_queue_set_vector(vdev
, n
, vector
);
208 if (vector
!= VIRTIO_NO_VECTOR
) {
209 return msix_vector_use(&proxy
->pci_dev
, vector
);
215 static bool virtio_pci_ioeventfd_enabled(DeviceState
*d
)
217 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
219 return (proxy
->flags
& VIRTIO_PCI_FLAG_USE_IOEVENTFD
) != 0;
222 #define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000
224 static inline int virtio_pci_queue_mem_mult(struct VirtIOPCIProxy
*proxy
)
226 return (proxy
->flags
& VIRTIO_PCI_FLAG_PAGE_PER_VQ
) ?
227 QEMU_VIRTIO_PCI_QUEUE_MEM_MULT
: 4;
230 static int virtio_pci_ioeventfd_assign(DeviceState
*d
, EventNotifier
*notifier
,
233 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
234 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
235 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
236 bool legacy
= virtio_pci_legacy(proxy
);
237 bool modern
= virtio_pci_modern(proxy
);
238 bool fast_mmio
= kvm_ioeventfd_any_length_enabled();
239 bool modern_pio
= proxy
->flags
& VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY
;
240 MemoryRegion
*modern_mr
= &proxy
->notify
.mr
;
241 MemoryRegion
*modern_notify_mr
= &proxy
->notify_pio
.mr
;
242 MemoryRegion
*legacy_mr
= &proxy
->bar
;
243 hwaddr modern_addr
= virtio_pci_queue_mem_mult(proxy
) *
244 virtio_get_queue_index(vq
);
245 hwaddr legacy_addr
= VIRTIO_PCI_QUEUE_NOTIFY
;
250 memory_region_add_eventfd(modern_mr
, modern_addr
, 0,
253 memory_region_add_eventfd(modern_mr
, modern_addr
, 2,
257 memory_region_add_eventfd(modern_notify_mr
, 0, 2,
262 memory_region_add_eventfd(legacy_mr
, legacy_addr
, 2,
268 memory_region_del_eventfd(modern_mr
, modern_addr
, 0,
271 memory_region_del_eventfd(modern_mr
, modern_addr
, 2,
275 memory_region_del_eventfd(modern_notify_mr
, 0, 2,
280 memory_region_del_eventfd(legacy_mr
, legacy_addr
, 2,
287 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy
*proxy
)
289 virtio_bus_start_ioeventfd(&proxy
->bus
);
292 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy
*proxy
)
294 virtio_bus_stop_ioeventfd(&proxy
->bus
);
297 static void virtio_ioport_write(void *opaque
, uint32_t addr
, uint32_t val
)
299 VirtIOPCIProxy
*proxy
= opaque
;
300 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
304 case VIRTIO_PCI_GUEST_FEATURES
:
305 /* Guest does not negotiate properly? We have to assume nothing. */
306 if (val
& (1 << VIRTIO_F_BAD_FEATURE
)) {
307 val
= virtio_bus_get_vdev_bad_features(&proxy
->bus
);
309 virtio_set_features(vdev
, val
);
311 case VIRTIO_PCI_QUEUE_PFN
:
312 pa
= (hwaddr
)val
<< VIRTIO_PCI_QUEUE_ADDR_SHIFT
;
314 virtio_pci_reset(DEVICE(proxy
));
317 virtio_queue_set_addr(vdev
, vdev
->queue_sel
, pa
);
319 case VIRTIO_PCI_QUEUE_SEL
:
320 if (val
< VIRTIO_QUEUE_MAX
)
321 vdev
->queue_sel
= val
;
323 case VIRTIO_PCI_QUEUE_NOTIFY
:
324 if (val
< VIRTIO_QUEUE_MAX
) {
325 virtio_queue_notify(vdev
, val
);
328 case VIRTIO_PCI_STATUS
:
329 if (!(val
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
330 virtio_pci_stop_ioeventfd(proxy
);
333 virtio_set_status(vdev
, val
& 0xFF);
335 if (val
& VIRTIO_CONFIG_S_DRIVER_OK
) {
336 virtio_pci_start_ioeventfd(proxy
);
339 if (vdev
->status
== 0) {
340 virtio_pci_reset(DEVICE(proxy
));
343 /* Linux before 2.6.34 drives the device without enabling
344 the PCI device bus master bit. Enable it automatically
345 for the guest. This is a PCI spec violation but so is
346 initiating DMA with bus master bit clear. */
347 if (val
== (VIRTIO_CONFIG_S_ACKNOWLEDGE
| VIRTIO_CONFIG_S_DRIVER
)) {
348 pci_default_write_config(&proxy
->pci_dev
, PCI_COMMAND
,
349 proxy
->pci_dev
.config
[PCI_COMMAND
] |
350 PCI_COMMAND_MASTER
, 1);
353 case VIRTIO_MSI_CONFIG_VECTOR
:
354 msix_vector_unuse(&proxy
->pci_dev
, vdev
->config_vector
);
355 /* Make it possible for guest to discover an error took place. */
356 if (msix_vector_use(&proxy
->pci_dev
, val
) < 0)
357 val
= VIRTIO_NO_VECTOR
;
358 vdev
->config_vector
= val
;
360 case VIRTIO_MSI_QUEUE_VECTOR
:
361 msix_vector_unuse(&proxy
->pci_dev
,
362 virtio_queue_vector(vdev
, vdev
->queue_sel
));
363 /* Make it possible for guest to discover an error took place. */
364 if (msix_vector_use(&proxy
->pci_dev
, val
) < 0)
365 val
= VIRTIO_NO_VECTOR
;
366 virtio_queue_set_vector(vdev
, vdev
->queue_sel
, val
);
369 qemu_log_mask(LOG_GUEST_ERROR
,
370 "%s: unexpected address 0x%x value 0x%x\n",
371 __func__
, addr
, val
);
376 static uint32_t virtio_ioport_read(VirtIOPCIProxy
*proxy
, uint32_t addr
)
378 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
379 uint32_t ret
= 0xFFFFFFFF;
382 case VIRTIO_PCI_HOST_FEATURES
:
383 ret
= vdev
->host_features
;
385 case VIRTIO_PCI_GUEST_FEATURES
:
386 ret
= vdev
->guest_features
;
388 case VIRTIO_PCI_QUEUE_PFN
:
389 ret
= virtio_queue_get_addr(vdev
, vdev
->queue_sel
)
390 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT
;
392 case VIRTIO_PCI_QUEUE_NUM
:
393 ret
= virtio_queue_get_num(vdev
, vdev
->queue_sel
);
395 case VIRTIO_PCI_QUEUE_SEL
:
396 ret
= vdev
->queue_sel
;
398 case VIRTIO_PCI_STATUS
:
402 /* reading from the ISR also clears it. */
403 ret
= qatomic_xchg(&vdev
->isr
, 0);
404 pci_irq_deassert(&proxy
->pci_dev
);
406 case VIRTIO_MSI_CONFIG_VECTOR
:
407 ret
= vdev
->config_vector
;
409 case VIRTIO_MSI_QUEUE_VECTOR
:
410 ret
= virtio_queue_vector(vdev
, vdev
->queue_sel
);
419 static uint64_t virtio_pci_config_read(void *opaque
, hwaddr addr
,
422 VirtIOPCIProxy
*proxy
= opaque
;
423 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
424 uint32_t config
= VIRTIO_PCI_CONFIG_SIZE(&proxy
->pci_dev
);
432 return virtio_ioport_read(proxy
, addr
);
438 val
= virtio_config_readb(vdev
, addr
);
441 val
= virtio_config_readw(vdev
, addr
);
442 if (virtio_is_big_endian(vdev
)) {
447 val
= virtio_config_readl(vdev
, addr
);
448 if (virtio_is_big_endian(vdev
)) {
456 static void virtio_pci_config_write(void *opaque
, hwaddr addr
,
457 uint64_t val
, unsigned size
)
459 VirtIOPCIProxy
*proxy
= opaque
;
460 uint32_t config
= VIRTIO_PCI_CONFIG_SIZE(&proxy
->pci_dev
);
461 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
468 virtio_ioport_write(proxy
, addr
, val
);
473 * Virtio-PCI is odd. Ioports are LE but config space is target native
478 virtio_config_writeb(vdev
, addr
, val
);
481 if (virtio_is_big_endian(vdev
)) {
484 virtio_config_writew(vdev
, addr
, val
);
487 if (virtio_is_big_endian(vdev
)) {
490 virtio_config_writel(vdev
, addr
, val
);
495 static const MemoryRegionOps virtio_pci_config_ops
= {
496 .read
= virtio_pci_config_read
,
497 .write
= virtio_pci_config_write
,
499 .min_access_size
= 1,
500 .max_access_size
= 4,
502 .endianness
= DEVICE_LITTLE_ENDIAN
,
505 static MemoryRegion
*virtio_address_space_lookup(VirtIOPCIProxy
*proxy
,
506 hwaddr
*off
, int len
)
509 VirtIOPCIRegion
*reg
;
511 for (i
= 0; i
< ARRAY_SIZE(proxy
->regs
); ++i
) {
512 reg
= &proxy
->regs
[i
];
513 if (*off
>= reg
->offset
&&
514 *off
+ len
<= reg
->offset
+ reg
->size
) {
523 /* Below are generic functions to do memcpy from/to an address space,
524 * without byteswaps, with input validation.
526 * As regular address_space_* APIs all do some kind of byteswap at least for
527 * some host/target combinations, we are forced to explicitly convert to a
528 * known-endianness integer value.
529 * It doesn't really matter which endian format to go through, so the code
530 * below selects the endian that causes the least amount of work on the given
533 * Note: host pointer must be aligned.
536 void virtio_address_space_write(VirtIOPCIProxy
*proxy
, hwaddr addr
,
537 const uint8_t *buf
, int len
)
542 /* address_space_* APIs assume an aligned address.
543 * As address is under guest control, handle illegal values.
547 mr
= virtio_address_space_lookup(proxy
, &addr
, len
);
552 /* Make sure caller aligned buf properly */
553 assert(!(((uintptr_t)buf
) & (len
- 1)));
557 val
= pci_get_byte(buf
);
560 val
= pci_get_word(buf
);
563 val
= pci_get_long(buf
);
566 /* As length is under guest control, handle illegal values. */
569 memory_region_dispatch_write(mr
, addr
, val
, size_memop(len
) | MO_LE
,
570 MEMTXATTRS_UNSPECIFIED
);
574 virtio_address_space_read(VirtIOPCIProxy
*proxy
, hwaddr addr
,
575 uint8_t *buf
, int len
)
580 /* address_space_* APIs assume an aligned address.
581 * As address is under guest control, handle illegal values.
585 mr
= virtio_address_space_lookup(proxy
, &addr
, len
);
590 /* Make sure caller aligned buf properly */
591 assert(!(((uintptr_t)buf
) & (len
- 1)));
593 memory_region_dispatch_read(mr
, addr
, &val
, size_memop(len
) | MO_LE
,
594 MEMTXATTRS_UNSPECIFIED
);
597 pci_set_byte(buf
, val
);
600 pci_set_word(buf
, val
);
603 pci_set_long(buf
, val
);
606 /* As length is under guest control, handle illegal values. */
611 static void virtio_write_config(PCIDevice
*pci_dev
, uint32_t address
,
612 uint32_t val
, int len
)
614 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(pci_dev
);
615 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
616 struct virtio_pci_cfg_cap
*cfg
;
618 pci_default_write_config(pci_dev
, address
, val
, len
);
620 if (proxy
->flags
& VIRTIO_PCI_FLAG_INIT_FLR
) {
621 pcie_cap_flr_write_config(pci_dev
, address
, val
, len
);
624 if (range_covers_byte(address
, len
, PCI_COMMAND
)) {
625 if (!(pci_dev
->config
[PCI_COMMAND
] & PCI_COMMAND_MASTER
)) {
626 virtio_set_disabled(vdev
, true);
627 virtio_pci_stop_ioeventfd(proxy
);
628 virtio_set_status(vdev
, vdev
->status
& ~VIRTIO_CONFIG_S_DRIVER_OK
);
630 virtio_set_disabled(vdev
, false);
634 if (proxy
->config_cap
&&
635 ranges_overlap(address
, len
, proxy
->config_cap
+ offsetof(struct virtio_pci_cfg_cap
,
637 sizeof cfg
->pci_cfg_data
)) {
641 cfg
= (void *)(proxy
->pci_dev
.config
+ proxy
->config_cap
);
642 off
= le32_to_cpu(cfg
->cap
.offset
);
643 len
= le32_to_cpu(cfg
->cap
.length
);
645 if (len
== 1 || len
== 2 || len
== 4) {
646 assert(len
<= sizeof cfg
->pci_cfg_data
);
647 virtio_address_space_write(proxy
, off
, cfg
->pci_cfg_data
, len
);
652 static uint32_t virtio_read_config(PCIDevice
*pci_dev
,
653 uint32_t address
, int len
)
655 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(pci_dev
);
656 struct virtio_pci_cfg_cap
*cfg
;
658 if (proxy
->config_cap
&&
659 ranges_overlap(address
, len
, proxy
->config_cap
+ offsetof(struct virtio_pci_cfg_cap
,
661 sizeof cfg
->pci_cfg_data
)) {
665 cfg
= (void *)(proxy
->pci_dev
.config
+ proxy
->config_cap
);
666 off
= le32_to_cpu(cfg
->cap
.offset
);
667 len
= le32_to_cpu(cfg
->cap
.length
);
669 if (len
== 1 || len
== 2 || len
== 4) {
670 assert(len
<= sizeof cfg
->pci_cfg_data
);
671 virtio_address_space_read(proxy
, off
, cfg
->pci_cfg_data
, len
);
675 return pci_default_read_config(pci_dev
, address
, len
);
678 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy
*proxy
,
679 unsigned int queue_no
,
682 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
685 if (irqfd
->users
== 0) {
686 KVMRouteChange c
= kvm_irqchip_begin_route_changes(kvm_state
);
687 ret
= kvm_irqchip_add_msi_route(&c
, vector
, &proxy
->pci_dev
);
691 kvm_irqchip_commit_route_changes(&c
);
698 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy
*proxy
,
701 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
702 if (--irqfd
->users
== 0) {
703 kvm_irqchip_release_virq(kvm_state
, irqfd
->virq
);
707 static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy
*proxy
,
708 unsigned int queue_no
,
711 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
712 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
713 VirtQueue
*vq
= virtio_get_queue(vdev
, queue_no
);
714 EventNotifier
*n
= virtio_queue_get_guest_notifier(vq
);
715 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state
, n
, NULL
, irqfd
->virq
);
718 static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy
*proxy
,
719 unsigned int queue_no
,
722 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
723 VirtQueue
*vq
= virtio_get_queue(vdev
, queue_no
);
724 EventNotifier
*n
= virtio_queue_get_guest_notifier(vq
);
725 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
728 ret
= kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state
, n
, irqfd
->virq
);
732 static int kvm_virtio_pci_vector_use(VirtIOPCIProxy
*proxy
, int nvqs
)
734 PCIDevice
*dev
= &proxy
->pci_dev
;
735 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
736 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
740 for (queue_no
= 0; queue_no
< nvqs
; queue_no
++) {
741 if (!virtio_queue_get_num(vdev
, queue_no
)) {
744 vector
= virtio_queue_vector(vdev
, queue_no
);
745 if (vector
>= msix_nr_vectors_allocated(dev
)) {
748 ret
= kvm_virtio_pci_vq_vector_use(proxy
, queue_no
, vector
);
752 /* If guest supports masking, set up irqfd now.
753 * Otherwise, delay until unmasked in the frontend.
755 if (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
) {
756 ret
= kvm_virtio_pci_irqfd_use(proxy
, queue_no
, vector
);
758 kvm_virtio_pci_vq_vector_release(proxy
, vector
);
766 while (--queue_no
>= 0) {
767 vector
= virtio_queue_vector(vdev
, queue_no
);
768 if (vector
>= msix_nr_vectors_allocated(dev
)) {
771 if (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
) {
772 kvm_virtio_pci_irqfd_release(proxy
, queue_no
, vector
);
774 kvm_virtio_pci_vq_vector_release(proxy
, vector
);
779 static void kvm_virtio_pci_vector_release(VirtIOPCIProxy
*proxy
, int nvqs
)
781 PCIDevice
*dev
= &proxy
->pci_dev
;
782 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
785 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
787 for (queue_no
= 0; queue_no
< nvqs
; queue_no
++) {
788 if (!virtio_queue_get_num(vdev
, queue_no
)) {
791 vector
= virtio_queue_vector(vdev
, queue_no
);
792 if (vector
>= msix_nr_vectors_allocated(dev
)) {
795 /* If guest supports masking, clean up irqfd now.
796 * Otherwise, it was cleaned when masked in the frontend.
798 if (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
) {
799 kvm_virtio_pci_irqfd_release(proxy
, queue_no
, vector
);
801 kvm_virtio_pci_vq_vector_release(proxy
, vector
);
805 static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy
*proxy
,
806 unsigned int queue_no
,
810 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
811 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
812 VirtQueue
*vq
= virtio_get_queue(vdev
, queue_no
);
813 EventNotifier
*n
= virtio_queue_get_guest_notifier(vq
);
817 if (proxy
->vector_irqfd
) {
818 irqfd
= &proxy
->vector_irqfd
[vector
];
819 if (irqfd
->msg
.data
!= msg
.data
|| irqfd
->msg
.address
!= msg
.address
) {
820 ret
= kvm_irqchip_update_msi_route(kvm_state
, irqfd
->virq
, msg
,
825 kvm_irqchip_commit_routes(kvm_state
);
829 /* If guest supports masking, irqfd is already setup, unmask it.
830 * Otherwise, set it up now.
832 if (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
) {
833 k
->guest_notifier_mask(vdev
, queue_no
, false);
834 /* Test after unmasking to avoid losing events. */
835 if (k
->guest_notifier_pending
&&
836 k
->guest_notifier_pending(vdev
, queue_no
)) {
837 event_notifier_set(n
);
840 ret
= kvm_virtio_pci_irqfd_use(proxy
, queue_no
, vector
);
845 static void virtio_pci_vq_vector_mask(VirtIOPCIProxy
*proxy
,
846 unsigned int queue_no
,
849 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
850 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
852 /* If guest supports masking, keep irqfd but mask it.
853 * Otherwise, clean it up now.
855 if (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
) {
856 k
->guest_notifier_mask(vdev
, queue_no
, true);
858 kvm_virtio_pci_irqfd_release(proxy
, queue_no
, vector
);
862 static int virtio_pci_vector_unmask(PCIDevice
*dev
, unsigned vector
,
865 VirtIOPCIProxy
*proxy
= container_of(dev
, VirtIOPCIProxy
, pci_dev
);
866 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
867 VirtQueue
*vq
= virtio_vector_first_queue(vdev
, vector
);
868 int ret
, index
, unmasked
= 0;
871 index
= virtio_get_queue_index(vq
);
872 if (!virtio_queue_get_num(vdev
, index
)) {
875 if (index
< proxy
->nvqs_with_notifiers
) {
876 ret
= virtio_pci_vq_vector_unmask(proxy
, index
, vector
, msg
);
882 vq
= virtio_vector_next_queue(vq
);
888 vq
= virtio_vector_first_queue(vdev
, vector
);
889 while (vq
&& unmasked
>= 0) {
890 index
= virtio_get_queue_index(vq
);
891 if (index
< proxy
->nvqs_with_notifiers
) {
892 virtio_pci_vq_vector_mask(proxy
, index
, vector
);
895 vq
= virtio_vector_next_queue(vq
);
900 static void virtio_pci_vector_mask(PCIDevice
*dev
, unsigned vector
)
902 VirtIOPCIProxy
*proxy
= container_of(dev
, VirtIOPCIProxy
, pci_dev
);
903 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
904 VirtQueue
*vq
= virtio_vector_first_queue(vdev
, vector
);
908 index
= virtio_get_queue_index(vq
);
909 if (!virtio_queue_get_num(vdev
, index
)) {
912 if (index
< proxy
->nvqs_with_notifiers
) {
913 virtio_pci_vq_vector_mask(proxy
, index
, vector
);
915 vq
= virtio_vector_next_queue(vq
);
919 static void virtio_pci_vector_poll(PCIDevice
*dev
,
920 unsigned int vector_start
,
921 unsigned int vector_end
)
923 VirtIOPCIProxy
*proxy
= container_of(dev
, VirtIOPCIProxy
, pci_dev
);
924 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
925 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
928 EventNotifier
*notifier
;
931 for (queue_no
= 0; queue_no
< proxy
->nvqs_with_notifiers
; queue_no
++) {
932 if (!virtio_queue_get_num(vdev
, queue_no
)) {
935 vector
= virtio_queue_vector(vdev
, queue_no
);
936 if (vector
< vector_start
|| vector
>= vector_end
||
937 !msix_is_masked(dev
, vector
)) {
940 vq
= virtio_get_queue(vdev
, queue_no
);
941 notifier
= virtio_queue_get_guest_notifier(vq
);
942 if (k
->guest_notifier_pending
) {
943 if (k
->guest_notifier_pending(vdev
, queue_no
)) {
944 msix_set_pending(dev
, vector
);
946 } else if (event_notifier_test_and_clear(notifier
)) {
947 msix_set_pending(dev
, vector
);
952 static int virtio_pci_set_guest_notifier(DeviceState
*d
, int n
, bool assign
,
955 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
956 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
957 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
958 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
959 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
962 int r
= event_notifier_init(notifier
, 0);
966 virtio_queue_set_guest_notifier_fd_handler(vq
, true, with_irqfd
);
968 virtio_queue_set_guest_notifier_fd_handler(vq
, false, with_irqfd
);
969 event_notifier_cleanup(notifier
);
972 if (!msix_enabled(&proxy
->pci_dev
) &&
973 vdev
->use_guest_notifier_mask
&&
974 vdc
->guest_notifier_mask
) {
975 vdc
->guest_notifier_mask(vdev
, n
, !assign
);
981 static bool virtio_pci_query_guest_notifiers(DeviceState
*d
)
983 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
984 return msix_enabled(&proxy
->pci_dev
);
987 static int virtio_pci_set_guest_notifiers(DeviceState
*d
, int nvqs
, bool assign
)
989 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
990 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
991 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
993 bool with_irqfd
= msix_enabled(&proxy
->pci_dev
) &&
994 kvm_msi_via_irqfd_enabled();
996 nvqs
= MIN(nvqs
, VIRTIO_QUEUE_MAX
);
998 /* When deassigning, pass a consistent nvqs value
999 * to avoid leaking notifiers.
1001 assert(assign
|| nvqs
== proxy
->nvqs_with_notifiers
);
1003 proxy
->nvqs_with_notifiers
= nvqs
;
1005 /* Must unset vector notifier while guest notifier is still assigned */
1006 if ((proxy
->vector_irqfd
|| k
->guest_notifier_mask
) && !assign
) {
1007 msix_unset_vector_notifiers(&proxy
->pci_dev
);
1008 if (proxy
->vector_irqfd
) {
1009 kvm_virtio_pci_vector_release(proxy
, nvqs
);
1010 g_free(proxy
->vector_irqfd
);
1011 proxy
->vector_irqfd
= NULL
;
1015 for (n
= 0; n
< nvqs
; n
++) {
1016 if (!virtio_queue_get_num(vdev
, n
)) {
1020 r
= virtio_pci_set_guest_notifier(d
, n
, assign
, with_irqfd
);
1026 /* Must set vector notifier after guest notifier has been assigned */
1027 if ((with_irqfd
|| k
->guest_notifier_mask
) && assign
) {
1029 proxy
->vector_irqfd
=
1030 g_malloc0(sizeof(*proxy
->vector_irqfd
) *
1031 msix_nr_vectors_allocated(&proxy
->pci_dev
));
1032 r
= kvm_virtio_pci_vector_use(proxy
, nvqs
);
1037 r
= msix_set_vector_notifiers(&proxy
->pci_dev
,
1038 virtio_pci_vector_unmask
,
1039 virtio_pci_vector_mask
,
1040 virtio_pci_vector_poll
);
1042 goto notifiers_error
;
1051 kvm_virtio_pci_vector_release(proxy
, nvqs
);
1055 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
1058 virtio_pci_set_guest_notifier(d
, n
, !assign
, with_irqfd
);
1063 static int virtio_pci_set_host_notifier_mr(DeviceState
*d
, int n
,
1064 MemoryRegion
*mr
, bool assign
)
1066 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
1069 if (n
>= VIRTIO_QUEUE_MAX
|| !virtio_pci_modern(proxy
) ||
1070 virtio_pci_queue_mem_mult(proxy
) != memory_region_size(mr
)) {
1075 offset
= virtio_pci_queue_mem_mult(proxy
) * n
;
1076 memory_region_add_subregion_overlap(&proxy
->notify
.mr
, offset
, mr
, 1);
1078 memory_region_del_subregion(&proxy
->notify
.mr
, mr
);
1084 static void virtio_pci_vmstate_change(DeviceState
*d
, bool running
)
1086 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
1087 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1090 /* Old QEMU versions did not set bus master enable on status write.
1091 * Detect DRIVER set and enable it.
1093 if ((proxy
->flags
& VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION
) &&
1094 (vdev
->status
& VIRTIO_CONFIG_S_DRIVER
) &&
1095 !(proxy
->pci_dev
.config
[PCI_COMMAND
] & PCI_COMMAND_MASTER
)) {
1096 pci_default_write_config(&proxy
->pci_dev
, PCI_COMMAND
,
1097 proxy
->pci_dev
.config
[PCI_COMMAND
] |
1098 PCI_COMMAND_MASTER
, 1);
1100 virtio_pci_start_ioeventfd(proxy
);
1102 virtio_pci_stop_ioeventfd(proxy
);
1107 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
1110 static int virtio_pci_query_nvectors(DeviceState
*d
)
1112 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1114 return proxy
->nvectors
;
1117 static AddressSpace
*virtio_pci_get_dma_as(DeviceState
*d
)
1119 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1120 PCIDevice
*dev
= &proxy
->pci_dev
;
1122 return pci_get_address_space(dev
);
1125 static bool virtio_pci_iommu_enabled(DeviceState
*d
)
1127 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1128 PCIDevice
*dev
= &proxy
->pci_dev
;
1129 AddressSpace
*dma_as
= pci_device_iommu_address_space(dev
);
1131 if (dma_as
== &address_space_memory
) {
1138 static bool virtio_pci_queue_enabled(DeviceState
*d
, int n
)
1140 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1141 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1143 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
1144 return proxy
->vqs
[n
].enabled
;
1147 return virtio_queue_enabled_legacy(vdev
, n
);
1150 static int virtio_pci_add_mem_cap(VirtIOPCIProxy
*proxy
,
1151 struct virtio_pci_cap
*cap
)
1153 PCIDevice
*dev
= &proxy
->pci_dev
;
1156 offset
= pci_add_capability(dev
, PCI_CAP_ID_VNDR
, 0,
1157 cap
->cap_len
, &error_abort
);
1159 assert(cap
->cap_len
>= sizeof *cap
);
1160 memcpy(dev
->config
+ offset
+ PCI_CAP_FLAGS
, &cap
->cap_len
,
1161 cap
->cap_len
- PCI_CAP_FLAGS
);
1166 static uint64_t virtio_pci_common_read(void *opaque
, hwaddr addr
,
1169 VirtIOPCIProxy
*proxy
= opaque
;
1170 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1179 case VIRTIO_PCI_COMMON_DFSELECT
:
1180 val
= proxy
->dfselect
;
1182 case VIRTIO_PCI_COMMON_DF
:
1183 if (proxy
->dfselect
<= 1) {
1184 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1186 val
= (vdev
->host_features
& ~vdc
->legacy_features
) >>
1187 (32 * proxy
->dfselect
);
1190 case VIRTIO_PCI_COMMON_GFSELECT
:
1191 val
= proxy
->gfselect
;
1193 case VIRTIO_PCI_COMMON_GF
:
1194 if (proxy
->gfselect
< ARRAY_SIZE(proxy
->guest_features
)) {
1195 val
= proxy
->guest_features
[proxy
->gfselect
];
1198 case VIRTIO_PCI_COMMON_MSIX
:
1199 val
= vdev
->config_vector
;
1201 case VIRTIO_PCI_COMMON_NUMQ
:
1202 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; ++i
) {
1203 if (virtio_queue_get_num(vdev
, i
)) {
1208 case VIRTIO_PCI_COMMON_STATUS
:
1211 case VIRTIO_PCI_COMMON_CFGGENERATION
:
1212 val
= vdev
->generation
;
1214 case VIRTIO_PCI_COMMON_Q_SELECT
:
1215 val
= vdev
->queue_sel
;
1217 case VIRTIO_PCI_COMMON_Q_SIZE
:
1218 val
= virtio_queue_get_num(vdev
, vdev
->queue_sel
);
1220 case VIRTIO_PCI_COMMON_Q_MSIX
:
1221 val
= virtio_queue_vector(vdev
, vdev
->queue_sel
);
1223 case VIRTIO_PCI_COMMON_Q_ENABLE
:
1224 val
= proxy
->vqs
[vdev
->queue_sel
].enabled
;
1226 case VIRTIO_PCI_COMMON_Q_NOFF
:
1227 /* Simply map queues in order */
1228 val
= vdev
->queue_sel
;
1230 case VIRTIO_PCI_COMMON_Q_DESCLO
:
1231 val
= proxy
->vqs
[vdev
->queue_sel
].desc
[0];
1233 case VIRTIO_PCI_COMMON_Q_DESCHI
:
1234 val
= proxy
->vqs
[vdev
->queue_sel
].desc
[1];
1236 case VIRTIO_PCI_COMMON_Q_AVAILLO
:
1237 val
= proxy
->vqs
[vdev
->queue_sel
].avail
[0];
1239 case VIRTIO_PCI_COMMON_Q_AVAILHI
:
1240 val
= proxy
->vqs
[vdev
->queue_sel
].avail
[1];
1242 case VIRTIO_PCI_COMMON_Q_USEDLO
:
1243 val
= proxy
->vqs
[vdev
->queue_sel
].used
[0];
1245 case VIRTIO_PCI_COMMON_Q_USEDHI
:
1246 val
= proxy
->vqs
[vdev
->queue_sel
].used
[1];
1255 static void virtio_pci_common_write(void *opaque
, hwaddr addr
,
1256 uint64_t val
, unsigned size
)
1258 VirtIOPCIProxy
*proxy
= opaque
;
1259 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1266 case VIRTIO_PCI_COMMON_DFSELECT
:
1267 proxy
->dfselect
= val
;
1269 case VIRTIO_PCI_COMMON_GFSELECT
:
1270 proxy
->gfselect
= val
;
1272 case VIRTIO_PCI_COMMON_GF
:
1273 if (proxy
->gfselect
< ARRAY_SIZE(proxy
->guest_features
)) {
1274 proxy
->guest_features
[proxy
->gfselect
] = val
;
1275 virtio_set_features(vdev
,
1276 (((uint64_t)proxy
->guest_features
[1]) << 32) |
1277 proxy
->guest_features
[0]);
1280 case VIRTIO_PCI_COMMON_MSIX
:
1281 msix_vector_unuse(&proxy
->pci_dev
, vdev
->config_vector
);
1282 /* Make it possible for guest to discover an error took place. */
1283 if (msix_vector_use(&proxy
->pci_dev
, val
) < 0) {
1284 val
= VIRTIO_NO_VECTOR
;
1286 vdev
->config_vector
= val
;
1288 case VIRTIO_PCI_COMMON_STATUS
:
1289 if (!(val
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1290 virtio_pci_stop_ioeventfd(proxy
);
1293 virtio_set_status(vdev
, val
& 0xFF);
1295 if (val
& VIRTIO_CONFIG_S_DRIVER_OK
) {
1296 virtio_pci_start_ioeventfd(proxy
);
1299 if (vdev
->status
== 0) {
1300 virtio_pci_reset(DEVICE(proxy
));
1304 case VIRTIO_PCI_COMMON_Q_SELECT
:
1305 if (val
< VIRTIO_QUEUE_MAX
) {
1306 vdev
->queue_sel
= val
;
1309 case VIRTIO_PCI_COMMON_Q_SIZE
:
1310 proxy
->vqs
[vdev
->queue_sel
].num
= val
;
1311 virtio_queue_set_num(vdev
, vdev
->queue_sel
,
1312 proxy
->vqs
[vdev
->queue_sel
].num
);
1314 case VIRTIO_PCI_COMMON_Q_MSIX
:
1315 msix_vector_unuse(&proxy
->pci_dev
,
1316 virtio_queue_vector(vdev
, vdev
->queue_sel
));
1317 /* Make it possible for guest to discover an error took place. */
1318 if (msix_vector_use(&proxy
->pci_dev
, val
) < 0) {
1319 val
= VIRTIO_NO_VECTOR
;
1321 virtio_queue_set_vector(vdev
, vdev
->queue_sel
, val
);
1323 case VIRTIO_PCI_COMMON_Q_ENABLE
:
1325 virtio_queue_set_num(vdev
, vdev
->queue_sel
,
1326 proxy
->vqs
[vdev
->queue_sel
].num
);
1327 virtio_queue_set_rings(vdev
, vdev
->queue_sel
,
1328 ((uint64_t)proxy
->vqs
[vdev
->queue_sel
].desc
[1]) << 32 |
1329 proxy
->vqs
[vdev
->queue_sel
].desc
[0],
1330 ((uint64_t)proxy
->vqs
[vdev
->queue_sel
].avail
[1]) << 32 |
1331 proxy
->vqs
[vdev
->queue_sel
].avail
[0],
1332 ((uint64_t)proxy
->vqs
[vdev
->queue_sel
].used
[1]) << 32 |
1333 proxy
->vqs
[vdev
->queue_sel
].used
[0]);
1334 proxy
->vqs
[vdev
->queue_sel
].enabled
= 1;
1336 virtio_error(vdev
, "wrong value for queue_enable %"PRIx64
, val
);
1339 case VIRTIO_PCI_COMMON_Q_DESCLO
:
1340 proxy
->vqs
[vdev
->queue_sel
].desc
[0] = val
;
1342 case VIRTIO_PCI_COMMON_Q_DESCHI
:
1343 proxy
->vqs
[vdev
->queue_sel
].desc
[1] = val
;
1345 case VIRTIO_PCI_COMMON_Q_AVAILLO
:
1346 proxy
->vqs
[vdev
->queue_sel
].avail
[0] = val
;
1348 case VIRTIO_PCI_COMMON_Q_AVAILHI
:
1349 proxy
->vqs
[vdev
->queue_sel
].avail
[1] = val
;
1351 case VIRTIO_PCI_COMMON_Q_USEDLO
:
1352 proxy
->vqs
[vdev
->queue_sel
].used
[0] = val
;
1354 case VIRTIO_PCI_COMMON_Q_USEDHI
:
1355 proxy
->vqs
[vdev
->queue_sel
].used
[1] = val
;
1363 static uint64_t virtio_pci_notify_read(void *opaque
, hwaddr addr
,
1366 VirtIOPCIProxy
*proxy
= opaque
;
1367 if (virtio_bus_get_device(&proxy
->bus
) == NULL
) {
1374 static void virtio_pci_notify_write(void *opaque
, hwaddr addr
,
1375 uint64_t val
, unsigned size
)
1377 VirtIOPCIProxy
*proxy
= opaque
;
1378 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1380 unsigned queue
= addr
/ virtio_pci_queue_mem_mult(proxy
);
1382 if (vdev
!= NULL
&& queue
< VIRTIO_QUEUE_MAX
) {
1383 virtio_queue_notify(vdev
, queue
);
1387 static void virtio_pci_notify_write_pio(void *opaque
, hwaddr addr
,
1388 uint64_t val
, unsigned size
)
1390 VirtIOPCIProxy
*proxy
= opaque
;
1391 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1393 unsigned queue
= val
;
1395 if (vdev
!= NULL
&& queue
< VIRTIO_QUEUE_MAX
) {
1396 virtio_queue_notify(vdev
, queue
);
1400 static uint64_t virtio_pci_isr_read(void *opaque
, hwaddr addr
,
1403 VirtIOPCIProxy
*proxy
= opaque
;
1404 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1411 val
= qatomic_xchg(&vdev
->isr
, 0);
1412 pci_irq_deassert(&proxy
->pci_dev
);
1416 static void virtio_pci_isr_write(void *opaque
, hwaddr addr
,
1417 uint64_t val
, unsigned size
)
1421 static uint64_t virtio_pci_device_read(void *opaque
, hwaddr addr
,
1424 VirtIOPCIProxy
*proxy
= opaque
;
1425 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1434 val
= virtio_config_modern_readb(vdev
, addr
);
1437 val
= virtio_config_modern_readw(vdev
, addr
);
1440 val
= virtio_config_modern_readl(vdev
, addr
);
1449 static void virtio_pci_device_write(void *opaque
, hwaddr addr
,
1450 uint64_t val
, unsigned size
)
1452 VirtIOPCIProxy
*proxy
= opaque
;
1453 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1461 virtio_config_modern_writeb(vdev
, addr
, val
);
1464 virtio_config_modern_writew(vdev
, addr
, val
);
1467 virtio_config_modern_writel(vdev
, addr
, val
);
1472 static void virtio_pci_modern_regions_init(VirtIOPCIProxy
*proxy
,
1473 const char *vdev_name
)
1475 static const MemoryRegionOps common_ops
= {
1476 .read
= virtio_pci_common_read
,
1477 .write
= virtio_pci_common_write
,
1479 .min_access_size
= 1,
1480 .max_access_size
= 4,
1482 .endianness
= DEVICE_LITTLE_ENDIAN
,
1484 static const MemoryRegionOps isr_ops
= {
1485 .read
= virtio_pci_isr_read
,
1486 .write
= virtio_pci_isr_write
,
1488 .min_access_size
= 1,
1489 .max_access_size
= 4,
1491 .endianness
= DEVICE_LITTLE_ENDIAN
,
1493 static const MemoryRegionOps device_ops
= {
1494 .read
= virtio_pci_device_read
,
1495 .write
= virtio_pci_device_write
,
1497 .min_access_size
= 1,
1498 .max_access_size
= 4,
1500 .endianness
= DEVICE_LITTLE_ENDIAN
,
1502 static const MemoryRegionOps notify_ops
= {
1503 .read
= virtio_pci_notify_read
,
1504 .write
= virtio_pci_notify_write
,
1506 .min_access_size
= 1,
1507 .max_access_size
= 4,
1509 .endianness
= DEVICE_LITTLE_ENDIAN
,
1511 static const MemoryRegionOps notify_pio_ops
= {
1512 .read
= virtio_pci_notify_read
,
1513 .write
= virtio_pci_notify_write_pio
,
1515 .min_access_size
= 1,
1516 .max_access_size
= 4,
1518 .endianness
= DEVICE_LITTLE_ENDIAN
,
1520 g_autoptr(GString
) name
= g_string_new(NULL
);
1522 g_string_printf(name
, "virtio-pci-common-%s", vdev_name
);
1523 memory_region_init_io(&proxy
->common
.mr
, OBJECT(proxy
),
1527 proxy
->common
.size
);
1529 g_string_printf(name
, "virtio-pci-isr-%s", vdev_name
);
1530 memory_region_init_io(&proxy
->isr
.mr
, OBJECT(proxy
),
1536 g_string_printf(name
, "virtio-pci-device-%s", vdev_name
);
1537 memory_region_init_io(&proxy
->device
.mr
, OBJECT(proxy
),
1541 proxy
->device
.size
);
1543 g_string_printf(name
, "virtio-pci-notify-%s", vdev_name
);
1544 memory_region_init_io(&proxy
->notify
.mr
, OBJECT(proxy
),
1548 proxy
->notify
.size
);
1550 g_string_printf(name
, "virtio-pci-notify-pio-%s", vdev_name
);
1551 memory_region_init_io(&proxy
->notify_pio
.mr
, OBJECT(proxy
),
1555 proxy
->notify_pio
.size
);
1558 static void virtio_pci_modern_region_map(VirtIOPCIProxy
*proxy
,
1559 VirtIOPCIRegion
*region
,
1560 struct virtio_pci_cap
*cap
,
1564 memory_region_add_subregion(mr
, region
->offset
, ®ion
->mr
);
1566 cap
->cfg_type
= region
->type
;
1568 cap
->offset
= cpu_to_le32(region
->offset
);
1569 cap
->length
= cpu_to_le32(region
->size
);
1570 virtio_pci_add_mem_cap(proxy
, cap
);
1574 static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy
*proxy
,
1575 VirtIOPCIRegion
*region
,
1576 struct virtio_pci_cap
*cap
)
1578 virtio_pci_modern_region_map(proxy
, region
, cap
,
1579 &proxy
->modern_bar
, proxy
->modern_mem_bar_idx
);
1582 static void virtio_pci_modern_io_region_map(VirtIOPCIProxy
*proxy
,
1583 VirtIOPCIRegion
*region
,
1584 struct virtio_pci_cap
*cap
)
1586 virtio_pci_modern_region_map(proxy
, region
, cap
,
1587 &proxy
->io_bar
, proxy
->modern_io_bar_idx
);
1590 static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy
*proxy
,
1591 VirtIOPCIRegion
*region
)
1593 memory_region_del_subregion(&proxy
->modern_bar
,
1597 static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy
*proxy
,
1598 VirtIOPCIRegion
*region
)
1600 memory_region_del_subregion(&proxy
->io_bar
,
1604 static void virtio_pci_pre_plugged(DeviceState
*d
, Error
**errp
)
1606 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1607 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1609 if (virtio_pci_modern(proxy
)) {
1610 virtio_add_feature(&vdev
->host_features
, VIRTIO_F_VERSION_1
);
1613 virtio_add_feature(&vdev
->host_features
, VIRTIO_F_BAD_FEATURE
);
1616 /* This is called by virtio-bus just after the device is plugged. */
1617 static void virtio_pci_device_plugged(DeviceState
*d
, Error
**errp
)
1619 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1620 VirtioBusState
*bus
= &proxy
->bus
;
1621 bool legacy
= virtio_pci_legacy(proxy
);
1623 bool modern_pio
= proxy
->flags
& VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY
;
1626 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1629 * Virtio capabilities present without
1630 * VIRTIO_F_VERSION_1 confuses guests
1632 if (!proxy
->ignore_backend_features
&&
1633 !virtio_has_feature(vdev
->host_features
, VIRTIO_F_VERSION_1
)) {
1634 virtio_pci_disable_modern(proxy
);
1637 error_setg(errp
, "Device doesn't support modern mode, and legacy"
1638 " mode is disabled");
1639 error_append_hint(errp
, "Set disable-legacy to off\n");
1645 modern
= virtio_pci_modern(proxy
);
1647 config
= proxy
->pci_dev
.config
;
1648 if (proxy
->class_code
) {
1649 pci_config_set_class(config
, proxy
->class_code
);
1653 if (!virtio_legacy_allowed(vdev
)) {
1655 * To avoid migration issues, we allow legacy mode when legacy
1656 * check is disabled in the old machine types (< 5.1).
1658 if (virtio_legacy_check_disabled(vdev
)) {
1659 warn_report("device is modern-only, but for backward "
1660 "compatibility legacy is allowed");
1663 "device is modern-only, use disable-legacy=on");
1667 if (virtio_host_has_feature(vdev
, VIRTIO_F_IOMMU_PLATFORM
)) {
1668 error_setg(errp
, "VIRTIO_F_IOMMU_PLATFORM was supported by"
1669 " neither legacy nor transitional device");
1673 * Legacy and transitional devices use specific subsystem IDs.
1674 * Note that the subsystem vendor ID (config + PCI_SUBSYSTEM_VENDOR_ID)
1675 * is set to PCI_SUBVENDOR_ID_REDHAT_QUMRANET by default.
1677 pci_set_word(config
+ PCI_SUBSYSTEM_ID
, virtio_bus_get_vdev_id(bus
));
1679 /* pure virtio-1.0 */
1680 pci_set_word(config
+ PCI_VENDOR_ID
,
1681 PCI_VENDOR_ID_REDHAT_QUMRANET
);
1682 pci_set_word(config
+ PCI_DEVICE_ID
,
1683 0x1040 + virtio_bus_get_vdev_id(bus
));
1684 pci_config_set_revision(config
, 1);
1686 config
[PCI_INTERRUPT_PIN
] = 1;
1690 struct virtio_pci_cap cap
= {
1691 .cap_len
= sizeof cap
,
1693 struct virtio_pci_notify_cap notify
= {
1694 .cap
.cap_len
= sizeof notify
,
1695 .notify_off_multiplier
=
1696 cpu_to_le32(virtio_pci_queue_mem_mult(proxy
)),
1698 struct virtio_pci_cfg_cap cfg
= {
1699 .cap
.cap_len
= sizeof cfg
,
1700 .cap
.cfg_type
= VIRTIO_PCI_CAP_PCI_CFG
,
1702 struct virtio_pci_notify_cap notify_pio
= {
1703 .cap
.cap_len
= sizeof notify
,
1704 .notify_off_multiplier
= cpu_to_le32(0x0),
1707 struct virtio_pci_cfg_cap
*cfg_mask
;
1709 virtio_pci_modern_regions_init(proxy
, vdev
->name
);
1711 virtio_pci_modern_mem_region_map(proxy
, &proxy
->common
, &cap
);
1712 virtio_pci_modern_mem_region_map(proxy
, &proxy
->isr
, &cap
);
1713 virtio_pci_modern_mem_region_map(proxy
, &proxy
->device
, &cap
);
1714 virtio_pci_modern_mem_region_map(proxy
, &proxy
->notify
, ¬ify
.cap
);
1717 memory_region_init(&proxy
->io_bar
, OBJECT(proxy
),
1718 "virtio-pci-io", 0x4);
1720 pci_register_bar(&proxy
->pci_dev
, proxy
->modern_io_bar_idx
,
1721 PCI_BASE_ADDRESS_SPACE_IO
, &proxy
->io_bar
);
1723 virtio_pci_modern_io_region_map(proxy
, &proxy
->notify_pio
,
1727 pci_register_bar(&proxy
->pci_dev
, proxy
->modern_mem_bar_idx
,
1728 PCI_BASE_ADDRESS_SPACE_MEMORY
|
1729 PCI_BASE_ADDRESS_MEM_PREFETCH
|
1730 PCI_BASE_ADDRESS_MEM_TYPE_64
,
1731 &proxy
->modern_bar
);
1733 proxy
->config_cap
= virtio_pci_add_mem_cap(proxy
, &cfg
.cap
);
1734 cfg_mask
= (void *)(proxy
->pci_dev
.wmask
+ proxy
->config_cap
);
1735 pci_set_byte(&cfg_mask
->cap
.bar
, ~0x0);
1736 pci_set_long((uint8_t *)&cfg_mask
->cap
.offset
, ~0x0);
1737 pci_set_long((uint8_t *)&cfg_mask
->cap
.length
, ~0x0);
1738 pci_set_long(cfg_mask
->pci_cfg_data
, ~0x0);
1741 if (proxy
->nvectors
) {
1742 int err
= msix_init_exclusive_bar(&proxy
->pci_dev
, proxy
->nvectors
,
1743 proxy
->msix_bar_idx
, NULL
);
1745 /* Notice when a system that supports MSIx can't initialize it */
1746 if (err
!= -ENOTSUP
) {
1747 warn_report("unable to init msix vectors to %" PRIu32
,
1750 proxy
->nvectors
= 0;
1754 proxy
->pci_dev
.config_write
= virtio_write_config
;
1755 proxy
->pci_dev
.config_read
= virtio_read_config
;
1758 size
= VIRTIO_PCI_REGION_SIZE(&proxy
->pci_dev
)
1759 + virtio_bus_get_vdev_config_len(bus
);
1760 size
= pow2ceil(size
);
1762 memory_region_init_io(&proxy
->bar
, OBJECT(proxy
),
1763 &virtio_pci_config_ops
,
1764 proxy
, "virtio-pci", size
);
1766 pci_register_bar(&proxy
->pci_dev
, proxy
->legacy_io_bar_idx
,
1767 PCI_BASE_ADDRESS_SPACE_IO
, &proxy
->bar
);
1771 static void virtio_pci_device_unplugged(DeviceState
*d
)
1773 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1774 bool modern
= virtio_pci_modern(proxy
);
1775 bool modern_pio
= proxy
->flags
& VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY
;
1777 virtio_pci_stop_ioeventfd(proxy
);
1780 virtio_pci_modern_mem_region_unmap(proxy
, &proxy
->common
);
1781 virtio_pci_modern_mem_region_unmap(proxy
, &proxy
->isr
);
1782 virtio_pci_modern_mem_region_unmap(proxy
, &proxy
->device
);
1783 virtio_pci_modern_mem_region_unmap(proxy
, &proxy
->notify
);
1785 virtio_pci_modern_io_region_unmap(proxy
, &proxy
->notify_pio
);
1790 static void virtio_pci_realize(PCIDevice
*pci_dev
, Error
**errp
)
1792 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(pci_dev
);
1793 VirtioPCIClass
*k
= VIRTIO_PCI_GET_CLASS(pci_dev
);
1794 bool pcie_port
= pci_bus_is_express(pci_get_bus(pci_dev
)) &&
1795 !pci_bus_is_root(pci_get_bus(pci_dev
));
1797 if (kvm_enabled() && !kvm_has_many_ioeventfds()) {
1798 proxy
->flags
&= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD
;
1801 /* fd-based ioevents can't be synchronized in record/replay */
1802 if (replay_mode
!= REPLAY_MODE_NONE
) {
1803 proxy
->flags
&= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD
;
1807 * virtio pci bar layout used by default.
1808 * subclasses can re-arrange things if needed.
1810 * region 0 -- virtio legacy io bar
1811 * region 1 -- msi-x bar
1812 * region 2 -- virtio modern io bar (off by default)
1813 * region 4+5 -- virtio modern memory (64bit) bar
1816 proxy
->legacy_io_bar_idx
= 0;
1817 proxy
->msix_bar_idx
= 1;
1818 proxy
->modern_io_bar_idx
= 2;
1819 proxy
->modern_mem_bar_idx
= 4;
1821 proxy
->common
.offset
= 0x0;
1822 proxy
->common
.size
= 0x1000;
1823 proxy
->common
.type
= VIRTIO_PCI_CAP_COMMON_CFG
;
1825 proxy
->isr
.offset
= 0x1000;
1826 proxy
->isr
.size
= 0x1000;
1827 proxy
->isr
.type
= VIRTIO_PCI_CAP_ISR_CFG
;
1829 proxy
->device
.offset
= 0x2000;
1830 proxy
->device
.size
= 0x1000;
1831 proxy
->device
.type
= VIRTIO_PCI_CAP_DEVICE_CFG
;
1833 proxy
->notify
.offset
= 0x3000;
1834 proxy
->notify
.size
= virtio_pci_queue_mem_mult(proxy
) * VIRTIO_QUEUE_MAX
;
1835 proxy
->notify
.type
= VIRTIO_PCI_CAP_NOTIFY_CFG
;
1837 proxy
->notify_pio
.offset
= 0x0;
1838 proxy
->notify_pio
.size
= 0x4;
1839 proxy
->notify_pio
.type
= VIRTIO_PCI_CAP_NOTIFY_CFG
;
1841 /* subclasses can enforce modern, so do this unconditionally */
1842 memory_region_init(&proxy
->modern_bar
, OBJECT(proxy
), "virtio-pci",
1843 /* PCI BAR regions must be powers of 2 */
1844 pow2ceil(proxy
->notify
.offset
+ proxy
->notify
.size
));
1846 if (proxy
->disable_legacy
== ON_OFF_AUTO_AUTO
) {
1847 proxy
->disable_legacy
= pcie_port
? ON_OFF_AUTO_ON
: ON_OFF_AUTO_OFF
;
1850 if (!virtio_pci_modern(proxy
) && !virtio_pci_legacy(proxy
)) {
1851 error_setg(errp
, "device cannot work as neither modern nor legacy mode"
1853 error_append_hint(errp
, "Set either disable-modern or disable-legacy"
1858 if (pcie_port
&& pci_is_express(pci_dev
)) {
1860 uint16_t last_pcie_cap_offset
= PCI_CONFIG_SPACE_SIZE
;
1862 pos
= pcie_endpoint_cap_init(pci_dev
, 0);
1865 pos
= pci_add_capability(pci_dev
, PCI_CAP_ID_PM
, 0,
1866 PCI_PM_SIZEOF
, errp
);
1871 pci_dev
->exp
.pm_cap
= pos
;
1874 * Indicates that this function complies with revision 1.2 of the
1875 * PCI Power Management Interface Specification.
1877 pci_set_word(pci_dev
->config
+ pos
+ PCI_PM_PMC
, 0x3);
1879 if (proxy
->flags
& VIRTIO_PCI_FLAG_AER
) {
1880 pcie_aer_init(pci_dev
, PCI_ERR_VER
, last_pcie_cap_offset
,
1881 PCI_ERR_SIZEOF
, NULL
);
1882 last_pcie_cap_offset
+= PCI_ERR_SIZEOF
;
1885 if (proxy
->flags
& VIRTIO_PCI_FLAG_INIT_DEVERR
) {
1886 /* Init error enabling flags */
1887 pcie_cap_deverr_init(pci_dev
);
1890 if (proxy
->flags
& VIRTIO_PCI_FLAG_INIT_LNKCTL
) {
1891 /* Init Link Control Register */
1892 pcie_cap_lnkctl_init(pci_dev
);
1895 if (proxy
->flags
& VIRTIO_PCI_FLAG_INIT_PM
) {
1896 /* Init Power Management Control Register */
1897 pci_set_word(pci_dev
->wmask
+ pos
+ PCI_PM_CTRL
,
1898 PCI_PM_CTRL_STATE_MASK
);
1901 if (proxy
->flags
& VIRTIO_PCI_FLAG_ATS
) {
1902 pcie_ats_init(pci_dev
, last_pcie_cap_offset
,
1903 proxy
->flags
& VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED
);
1904 last_pcie_cap_offset
+= PCI_EXT_CAP_ATS_SIZEOF
;
1907 if (proxy
->flags
& VIRTIO_PCI_FLAG_INIT_FLR
) {
1908 /* Set Function Level Reset capability bit */
1909 pcie_cap_flr_init(pci_dev
);
1913 * make future invocations of pci_is_express() return false
1914 * and pci_config_size() return PCI_CONFIG_SPACE_SIZE.
1916 pci_dev
->cap_present
&= ~QEMU_PCI_CAP_EXPRESS
;
1919 virtio_pci_bus_new(&proxy
->bus
, sizeof(proxy
->bus
), proxy
);
1921 k
->realize(proxy
, errp
);
1925 static void virtio_pci_exit(PCIDevice
*pci_dev
)
1927 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(pci_dev
);
1928 bool pcie_port
= pci_bus_is_express(pci_get_bus(pci_dev
)) &&
1929 !pci_bus_is_root(pci_get_bus(pci_dev
));
1931 msix_uninit_exclusive_bar(pci_dev
);
1932 if (proxy
->flags
& VIRTIO_PCI_FLAG_AER
&& pcie_port
&&
1933 pci_is_express(pci_dev
)) {
1934 pcie_aer_exit(pci_dev
);
1938 static void virtio_pci_reset(DeviceState
*qdev
)
1940 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(qdev
);
1941 VirtioBusState
*bus
= VIRTIO_BUS(&proxy
->bus
);
1942 PCIDevice
*dev
= PCI_DEVICE(qdev
);
1945 virtio_pci_stop_ioeventfd(proxy
);
1946 virtio_bus_reset(bus
);
1947 msix_unuse_all_vectors(&proxy
->pci_dev
);
1949 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
1950 proxy
->vqs
[i
].enabled
= 0;
1951 proxy
->vqs
[i
].num
= 0;
1952 proxy
->vqs
[i
].desc
[0] = proxy
->vqs
[i
].desc
[1] = 0;
1953 proxy
->vqs
[i
].avail
[0] = proxy
->vqs
[i
].avail
[1] = 0;
1954 proxy
->vqs
[i
].used
[0] = proxy
->vqs
[i
].used
[1] = 0;
1957 if (pci_is_express(dev
)) {
1958 pcie_cap_deverr_reset(dev
);
1959 pcie_cap_lnkctl_reset(dev
);
1961 pci_set_word(dev
->config
+ dev
->exp
.pm_cap
+ PCI_PM_CTRL
, 0);
1965 static Property virtio_pci_properties
[] = {
1966 DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy
, flags
,
1967 VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT
, false),
1968 DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy
, flags
,
1969 VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT
, true),
1970 DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy
, flags
,
1971 VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT
, false),
1972 DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy
, flags
,
1973 VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT
, false),
1974 DEFINE_PROP_BIT("page-per-vq", VirtIOPCIProxy
, flags
,
1975 VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT
, false),
1976 DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy
,
1977 ignore_backend_features
, false),
1978 DEFINE_PROP_BIT("ats", VirtIOPCIProxy
, flags
,
1979 VIRTIO_PCI_FLAG_ATS_BIT
, false),
1980 DEFINE_PROP_BIT("x-ats-page-aligned", VirtIOPCIProxy
, flags
,
1981 VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED_BIT
, true),
1982 DEFINE_PROP_BIT("x-pcie-deverr-init", VirtIOPCIProxy
, flags
,
1983 VIRTIO_PCI_FLAG_INIT_DEVERR_BIT
, true),
1984 DEFINE_PROP_BIT("x-pcie-lnkctl-init", VirtIOPCIProxy
, flags
,
1985 VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT
, true),
1986 DEFINE_PROP_BIT("x-pcie-pm-init", VirtIOPCIProxy
, flags
,
1987 VIRTIO_PCI_FLAG_INIT_PM_BIT
, true),
1988 DEFINE_PROP_BIT("x-pcie-flr-init", VirtIOPCIProxy
, flags
,
1989 VIRTIO_PCI_FLAG_INIT_FLR_BIT
, true),
1990 DEFINE_PROP_BIT("aer", VirtIOPCIProxy
, flags
,
1991 VIRTIO_PCI_FLAG_AER_BIT
, false),
1992 DEFINE_PROP_END_OF_LIST(),
1995 static void virtio_pci_dc_realize(DeviceState
*qdev
, Error
**errp
)
1997 VirtioPCIClass
*vpciklass
= VIRTIO_PCI_GET_CLASS(qdev
);
1998 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(qdev
);
1999 PCIDevice
*pci_dev
= &proxy
->pci_dev
;
2001 if (!(proxy
->flags
& VIRTIO_PCI_FLAG_DISABLE_PCIE
) &&
2002 virtio_pci_modern(proxy
)) {
2003 pci_dev
->cap_present
|= QEMU_PCI_CAP_EXPRESS
;
2006 vpciklass
->parent_dc_realize(qdev
, errp
);
2009 static void virtio_pci_class_init(ObjectClass
*klass
, void *data
)
2011 DeviceClass
*dc
= DEVICE_CLASS(klass
);
2012 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
2013 VirtioPCIClass
*vpciklass
= VIRTIO_PCI_CLASS(klass
);
2015 device_class_set_props(dc
, virtio_pci_properties
);
2016 k
->realize
= virtio_pci_realize
;
2017 k
->exit
= virtio_pci_exit
;
2018 k
->vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
;
2019 k
->revision
= VIRTIO_PCI_ABI_VERSION
;
2020 k
->class_id
= PCI_CLASS_OTHERS
;
2021 device_class_set_parent_realize(dc
, virtio_pci_dc_realize
,
2022 &vpciklass
->parent_dc_realize
);
2023 dc
->reset
= virtio_pci_reset
;
2026 static const TypeInfo virtio_pci_info
= {
2027 .name
= TYPE_VIRTIO_PCI
,
2028 .parent
= TYPE_PCI_DEVICE
,
2029 .instance_size
= sizeof(VirtIOPCIProxy
),
2030 .class_init
= virtio_pci_class_init
,
2031 .class_size
= sizeof(VirtioPCIClass
),
2035 static Property virtio_pci_generic_properties
[] = {
2036 DEFINE_PROP_ON_OFF_AUTO("disable-legacy", VirtIOPCIProxy
, disable_legacy
,
2038 DEFINE_PROP_BOOL("disable-modern", VirtIOPCIProxy
, disable_modern
, false),
2039 DEFINE_PROP_END_OF_LIST(),
2042 static void virtio_pci_base_class_init(ObjectClass
*klass
, void *data
)
2044 const VirtioPCIDeviceTypeInfo
*t
= data
;
2045 if (t
->class_init
) {
2046 t
->class_init(klass
, NULL
);
2050 static void virtio_pci_generic_class_init(ObjectClass
*klass
, void *data
)
2052 DeviceClass
*dc
= DEVICE_CLASS(klass
);
2054 device_class_set_props(dc
, virtio_pci_generic_properties
);
2057 static void virtio_pci_transitional_instance_init(Object
*obj
)
2059 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(obj
);
2061 proxy
->disable_legacy
= ON_OFF_AUTO_OFF
;
2062 proxy
->disable_modern
= false;
2065 static void virtio_pci_non_transitional_instance_init(Object
*obj
)
2067 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(obj
);
2069 proxy
->disable_legacy
= ON_OFF_AUTO_ON
;
2070 proxy
->disable_modern
= false;
2073 void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo
*t
)
2075 char *base_name
= NULL
;
2076 TypeInfo base_type_info
= {
2077 .name
= t
->base_name
,
2078 .parent
= t
->parent
? t
->parent
: TYPE_VIRTIO_PCI
,
2079 .instance_size
= t
->instance_size
,
2080 .instance_init
= t
->instance_init
,
2081 .class_size
= t
->class_size
,
2083 .interfaces
= t
->interfaces
,
2085 TypeInfo generic_type_info
= {
2086 .name
= t
->generic_name
,
2087 .parent
= base_type_info
.name
,
2088 .class_init
= virtio_pci_generic_class_init
,
2089 .interfaces
= (InterfaceInfo
[]) {
2090 { INTERFACE_PCIE_DEVICE
},
2091 { INTERFACE_CONVENTIONAL_PCI_DEVICE
},
2096 if (!base_type_info
.name
) {
2097 /* No base type -> register a single generic device type */
2098 /* use intermediate %s-base-type to add generic device props */
2099 base_name
= g_strdup_printf("%s-base-type", t
->generic_name
);
2100 base_type_info
.name
= base_name
;
2101 base_type_info
.class_init
= virtio_pci_generic_class_init
;
2103 generic_type_info
.parent
= base_name
;
2104 generic_type_info
.class_init
= virtio_pci_base_class_init
;
2105 generic_type_info
.class_data
= (void *)t
;
2107 assert(!t
->non_transitional_name
);
2108 assert(!t
->transitional_name
);
2110 base_type_info
.class_init
= virtio_pci_base_class_init
;
2111 base_type_info
.class_data
= (void *)t
;
2114 type_register(&base_type_info
);
2115 if (generic_type_info
.name
) {
2116 type_register(&generic_type_info
);
2119 if (t
->non_transitional_name
) {
2120 const TypeInfo non_transitional_type_info
= {
2121 .name
= t
->non_transitional_name
,
2122 .parent
= base_type_info
.name
,
2123 .instance_init
= virtio_pci_non_transitional_instance_init
,
2124 .interfaces
= (InterfaceInfo
[]) {
2125 { INTERFACE_PCIE_DEVICE
},
2126 { INTERFACE_CONVENTIONAL_PCI_DEVICE
},
2130 type_register(&non_transitional_type_info
);
2133 if (t
->transitional_name
) {
2134 const TypeInfo transitional_type_info
= {
2135 .name
= t
->transitional_name
,
2136 .parent
= base_type_info
.name
,
2137 .instance_init
= virtio_pci_transitional_instance_init
,
2138 .interfaces
= (InterfaceInfo
[]) {
2140 * Transitional virtio devices work only as Conventional PCI
2141 * devices because they require PIO ports.
2143 { INTERFACE_CONVENTIONAL_PCI_DEVICE
},
2147 type_register(&transitional_type_info
);
2152 unsigned virtio_pci_optimal_num_queues(unsigned fixed_queues
)
2155 * 1:1 vq to vCPU mapping is ideal because the same vCPU that submitted
2156 * virtqueue buffers can handle their completion. When a different vCPU
2157 * handles completion it may need to IPI the vCPU that submitted the
2158 * request and this adds overhead.
2160 * Virtqueues consume guest RAM and MSI-X vectors. This is wasteful in
2161 * guests with very many vCPUs and a device that is only used by a few
2162 * vCPUs. Unfortunately optimizing that case requires manual pinning inside
2163 * the guest, so those users might as well manually set the number of
2164 * queues. There is no upper limit that can be applied automatically and
2165 * doing so arbitrarily would result in a sudden performance drop once the
2166 * threshold number of vCPUs is exceeded.
2168 unsigned num_queues
= current_machine
->smp
.cpus
;
2171 * The maximum number of MSI-X vectors is PCI_MSIX_FLAGS_QSIZE + 1, but the
2172 * config change interrupt and the fixed virtqueues must be taken into
2175 num_queues
= MIN(num_queues
, PCI_MSIX_FLAGS_QSIZE
- fixed_queues
);
2178 * There is a limit to how many virtqueues a device can have.
2180 return MIN(num_queues
, VIRTIO_QUEUE_MAX
- fixed_queues
);
2183 /* virtio-pci-bus */
2185 static void virtio_pci_bus_new(VirtioBusState
*bus
, size_t bus_size
,
2186 VirtIOPCIProxy
*dev
)
2188 DeviceState
*qdev
= DEVICE(dev
);
2189 char virtio_bus_name
[] = "virtio-bus";
2191 qbus_init(bus
, bus_size
, TYPE_VIRTIO_PCI_BUS
, qdev
, virtio_bus_name
);
2194 static void virtio_pci_bus_class_init(ObjectClass
*klass
, void *data
)
2196 BusClass
*bus_class
= BUS_CLASS(klass
);
2197 VirtioBusClass
*k
= VIRTIO_BUS_CLASS(klass
);
2198 bus_class
->max_dev
= 1;
2199 k
->notify
= virtio_pci_notify
;
2200 k
->save_config
= virtio_pci_save_config
;
2201 k
->load_config
= virtio_pci_load_config
;
2202 k
->save_queue
= virtio_pci_save_queue
;
2203 k
->load_queue
= virtio_pci_load_queue
;
2204 k
->save_extra_state
= virtio_pci_save_extra_state
;
2205 k
->load_extra_state
= virtio_pci_load_extra_state
;
2206 k
->has_extra_state
= virtio_pci_has_extra_state
;
2207 k
->query_guest_notifiers
= virtio_pci_query_guest_notifiers
;
2208 k
->set_guest_notifiers
= virtio_pci_set_guest_notifiers
;
2209 k
->set_host_notifier_mr
= virtio_pci_set_host_notifier_mr
;
2210 k
->vmstate_change
= virtio_pci_vmstate_change
;
2211 k
->pre_plugged
= virtio_pci_pre_plugged
;
2212 k
->device_plugged
= virtio_pci_device_plugged
;
2213 k
->device_unplugged
= virtio_pci_device_unplugged
;
2214 k
->query_nvectors
= virtio_pci_query_nvectors
;
2215 k
->ioeventfd_enabled
= virtio_pci_ioeventfd_enabled
;
2216 k
->ioeventfd_assign
= virtio_pci_ioeventfd_assign
;
2217 k
->get_dma_as
= virtio_pci_get_dma_as
;
2218 k
->iommu_enabled
= virtio_pci_iommu_enabled
;
2219 k
->queue_enabled
= virtio_pci_queue_enabled
;
2222 static const TypeInfo virtio_pci_bus_info
= {
2223 .name
= TYPE_VIRTIO_PCI_BUS
,
2224 .parent
= TYPE_VIRTIO_BUS
,
2225 .instance_size
= sizeof(VirtioPCIBusState
),
2226 .class_size
= sizeof(VirtioPCIBusClass
),
2227 .class_init
= virtio_pci_bus_class_init
,
2230 static void virtio_pci_register_types(void)
2233 type_register_static(&virtio_pci_bus_info
);
2234 type_register_static(&virtio_pci_info
);
2237 type_init(virtio_pci_register_types
)