4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2009 CodeSourcery
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paul Brook <paul@codesourcery.com>
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
21 #include "virtio-blk.h"
22 #include "virtio-net.h"
23 #include "virtio-serial.h"
24 #include "virtio-scsi.h"
26 #include "qemu-error.h"
33 #include "virtio-pci.h"
36 /* from Linux's linux/virtio_pci.h */
38 /* A 32-bit r/o bitmask of the features supported by the host */
39 #define VIRTIO_PCI_HOST_FEATURES 0
41 /* A 32-bit r/w bitmask of features activated by the guest */
42 #define VIRTIO_PCI_GUEST_FEATURES 4
44 /* A 32-bit r/w PFN for the currently selected queue */
45 #define VIRTIO_PCI_QUEUE_PFN 8
47 /* A 16-bit r/o queue size for the currently selected queue */
48 #define VIRTIO_PCI_QUEUE_NUM 12
50 /* A 16-bit r/w queue selector */
51 #define VIRTIO_PCI_QUEUE_SEL 14
53 /* A 16-bit r/w queue notifier */
54 #define VIRTIO_PCI_QUEUE_NOTIFY 16
56 /* An 8-bit device status register. */
57 #define VIRTIO_PCI_STATUS 18
59 /* An 8-bit r/o interrupt status register. Reading the value will return the
60 * current contents of the ISR and will also clear it. This is effectively
61 * a read-and-acknowledge. */
62 #define VIRTIO_PCI_ISR 19
64 /* MSI-X registers: only enabled if MSI-X is enabled. */
65 /* A 16-bit vector for configuration changes. */
66 #define VIRTIO_MSI_CONFIG_VECTOR 20
67 /* A 16-bit vector for selected queue notifications. */
68 #define VIRTIO_MSI_QUEUE_VECTOR 22
70 /* Config space size */
71 #define VIRTIO_PCI_CONFIG_NOMSI 20
72 #define VIRTIO_PCI_CONFIG_MSI 24
73 #define VIRTIO_PCI_REGION_SIZE(dev) (msix_present(dev) ? \
74 VIRTIO_PCI_CONFIG_MSI : \
75 VIRTIO_PCI_CONFIG_NOMSI)
77 /* The remaining space is defined by each driver as the per-driver
78 * configuration space */
79 #define VIRTIO_PCI_CONFIG(dev) (msix_enabled(dev) ? \
80 VIRTIO_PCI_CONFIG_MSI : \
81 VIRTIO_PCI_CONFIG_NOMSI)
83 /* How many bits to shift physical queue address written to QUEUE_PFN.
84 * 12 is historical, and due to x86 page size. */
85 #define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
87 /* Flags track per-device state like workarounds for quirks in older guests. */
88 #define VIRTIO_PCI_FLAG_BUS_MASTER_BUG (1 << 0)
90 /* QEMU doesn't strictly need write barriers since everything runs in
91 * lock-step. We'll leave the calls to wmb() in though to make it obvious for
92 * KVM or if kqemu gets SMP support.
94 #define wmb() do { } while (0)
96 /* HACK for virtio to determine if it's running a big endian guest */
97 bool virtio_is_big_endian(void);
101 static void virtio_pci_notify(void *opaque
, uint16_t vector
)
103 VirtIOPCIProxy
*proxy
= opaque
;
104 if (msix_enabled(&proxy
->pci_dev
))
105 msix_notify(&proxy
->pci_dev
, vector
);
107 qemu_set_irq(proxy
->pci_dev
.irq
[0], proxy
->vdev
->isr
& 1);
110 static void virtio_pci_save_config(void * opaque
, QEMUFile
*f
)
112 VirtIOPCIProxy
*proxy
= opaque
;
113 pci_device_save(&proxy
->pci_dev
, f
);
114 msix_save(&proxy
->pci_dev
, f
);
115 if (msix_present(&proxy
->pci_dev
))
116 qemu_put_be16(f
, proxy
->vdev
->config_vector
);
119 static void virtio_pci_save_queue(void * opaque
, int n
, QEMUFile
*f
)
121 VirtIOPCIProxy
*proxy
= opaque
;
122 if (msix_present(&proxy
->pci_dev
))
123 qemu_put_be16(f
, virtio_queue_vector(proxy
->vdev
, n
));
126 static int virtio_pci_load_config(void * opaque
, QEMUFile
*f
)
128 VirtIOPCIProxy
*proxy
= opaque
;
130 ret
= pci_device_load(&proxy
->pci_dev
, f
);
134 msix_unuse_all_vectors(&proxy
->pci_dev
);
135 msix_load(&proxy
->pci_dev
, f
);
136 if (msix_present(&proxy
->pci_dev
)) {
137 qemu_get_be16s(f
, &proxy
->vdev
->config_vector
);
139 proxy
->vdev
->config_vector
= VIRTIO_NO_VECTOR
;
141 if (proxy
->vdev
->config_vector
!= VIRTIO_NO_VECTOR
) {
142 return msix_vector_use(&proxy
->pci_dev
, proxy
->vdev
->config_vector
);
147 static int virtio_pci_load_queue(void * opaque
, int n
, QEMUFile
*f
)
149 VirtIOPCIProxy
*proxy
= opaque
;
151 if (msix_present(&proxy
->pci_dev
)) {
152 qemu_get_be16s(f
, &vector
);
154 vector
= VIRTIO_NO_VECTOR
;
156 virtio_queue_set_vector(proxy
->vdev
, n
, vector
);
157 if (vector
!= VIRTIO_NO_VECTOR
) {
158 return msix_vector_use(&proxy
->pci_dev
, vector
);
163 static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy
*proxy
,
164 int n
, bool assign
, bool set_handler
)
166 VirtQueue
*vq
= virtio_get_queue(proxy
->vdev
, n
);
167 EventNotifier
*notifier
= virtio_queue_get_host_notifier(vq
);
171 r
= event_notifier_init(notifier
, 1);
173 error_report("%s: unable to init event notifier: %d",
177 virtio_queue_set_host_notifier_fd_handler(vq
, true, set_handler
);
178 memory_region_add_eventfd(&proxy
->bar
, VIRTIO_PCI_QUEUE_NOTIFY
, 2,
181 memory_region_del_eventfd(&proxy
->bar
, VIRTIO_PCI_QUEUE_NOTIFY
, 2,
183 virtio_queue_set_host_notifier_fd_handler(vq
, false, false);
184 event_notifier_cleanup(notifier
);
189 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy
*proxy
)
193 if (!(proxy
->flags
& VIRTIO_PCI_FLAG_USE_IOEVENTFD
) ||
194 proxy
->ioeventfd_disabled
||
195 proxy
->ioeventfd_started
) {
199 for (n
= 0; n
< VIRTIO_PCI_QUEUE_MAX
; n
++) {
200 if (!virtio_queue_get_num(proxy
->vdev
, n
)) {
204 r
= virtio_pci_set_host_notifier_internal(proxy
, n
, true, true);
209 proxy
->ioeventfd_started
= true;
214 if (!virtio_queue_get_num(proxy
->vdev
, n
)) {
218 r
= virtio_pci_set_host_notifier_internal(proxy
, n
, false, false);
221 proxy
->ioeventfd_started
= false;
222 error_report("%s: failed. Fallback to a userspace (slower).", __func__
);
225 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy
*proxy
)
230 if (!proxy
->ioeventfd_started
) {
234 for (n
= 0; n
< VIRTIO_PCI_QUEUE_MAX
; n
++) {
235 if (!virtio_queue_get_num(proxy
->vdev
, n
)) {
239 r
= virtio_pci_set_host_notifier_internal(proxy
, n
, false, false);
242 proxy
->ioeventfd_started
= false;
245 void virtio_pci_reset(DeviceState
*d
)
247 VirtIOPCIProxy
*proxy
= container_of(d
, VirtIOPCIProxy
, pci_dev
.qdev
);
248 virtio_pci_stop_ioeventfd(proxy
);
249 virtio_reset(proxy
->vdev
);
250 msix_unuse_all_vectors(&proxy
->pci_dev
);
251 proxy
->flags
&= ~VIRTIO_PCI_FLAG_BUS_MASTER_BUG
;
254 static void virtio_ioport_write(void *opaque
, uint32_t addr
, uint32_t val
)
256 VirtIOPCIProxy
*proxy
= opaque
;
257 VirtIODevice
*vdev
= proxy
->vdev
;
261 case VIRTIO_PCI_GUEST_FEATURES
:
262 /* Guest does not negotiate properly? We have to assume nothing. */
263 if (val
& (1 << VIRTIO_F_BAD_FEATURE
)) {
264 val
= vdev
->bad_features
? vdev
->bad_features(vdev
) : 0;
266 virtio_set_features(vdev
, val
);
268 case VIRTIO_PCI_QUEUE_PFN
:
269 pa
= (hwaddr
)val
<< VIRTIO_PCI_QUEUE_ADDR_SHIFT
;
271 virtio_pci_stop_ioeventfd(proxy
);
272 virtio_reset(proxy
->vdev
);
273 msix_unuse_all_vectors(&proxy
->pci_dev
);
276 virtio_queue_set_addr(vdev
, vdev
->queue_sel
, pa
);
278 case VIRTIO_PCI_QUEUE_SEL
:
279 if (val
< VIRTIO_PCI_QUEUE_MAX
)
280 vdev
->queue_sel
= val
;
282 case VIRTIO_PCI_QUEUE_NOTIFY
:
283 if (val
< VIRTIO_PCI_QUEUE_MAX
) {
284 virtio_queue_notify(vdev
, val
);
287 case VIRTIO_PCI_STATUS
:
288 if (!(val
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
289 virtio_pci_stop_ioeventfd(proxy
);
292 virtio_set_status(vdev
, val
& 0xFF);
294 if (val
& VIRTIO_CONFIG_S_DRIVER_OK
) {
295 virtio_pci_start_ioeventfd(proxy
);
298 if (vdev
->status
== 0) {
299 virtio_reset(proxy
->vdev
);
300 msix_unuse_all_vectors(&proxy
->pci_dev
);
303 /* Linux before 2.6.34 sets the device as OK without enabling
304 the PCI device bus master bit. In this case we need to disable
305 some safety checks. */
306 if ((val
& VIRTIO_CONFIG_S_DRIVER_OK
) &&
307 !(proxy
->pci_dev
.config
[PCI_COMMAND
] & PCI_COMMAND_MASTER
)) {
308 proxy
->flags
|= VIRTIO_PCI_FLAG_BUS_MASTER_BUG
;
311 case VIRTIO_MSI_CONFIG_VECTOR
:
312 msix_vector_unuse(&proxy
->pci_dev
, vdev
->config_vector
);
313 /* Make it possible for guest to discover an error took place. */
314 if (msix_vector_use(&proxy
->pci_dev
, val
) < 0)
315 val
= VIRTIO_NO_VECTOR
;
316 vdev
->config_vector
= val
;
318 case VIRTIO_MSI_QUEUE_VECTOR
:
319 msix_vector_unuse(&proxy
->pci_dev
,
320 virtio_queue_vector(vdev
, vdev
->queue_sel
));
321 /* Make it possible for guest to discover an error took place. */
322 if (msix_vector_use(&proxy
->pci_dev
, val
) < 0)
323 val
= VIRTIO_NO_VECTOR
;
324 virtio_queue_set_vector(vdev
, vdev
->queue_sel
, val
);
327 error_report("%s: unexpected address 0x%x value 0x%x",
328 __func__
, addr
, val
);
333 static uint32_t virtio_ioport_read(VirtIOPCIProxy
*proxy
, uint32_t addr
)
335 VirtIODevice
*vdev
= proxy
->vdev
;
336 uint32_t ret
= 0xFFFFFFFF;
339 case VIRTIO_PCI_HOST_FEATURES
:
340 ret
= proxy
->host_features
;
342 case VIRTIO_PCI_GUEST_FEATURES
:
343 ret
= vdev
->guest_features
;
345 case VIRTIO_PCI_QUEUE_PFN
:
346 ret
= virtio_queue_get_addr(vdev
, vdev
->queue_sel
)
347 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT
;
349 case VIRTIO_PCI_QUEUE_NUM
:
350 ret
= virtio_queue_get_num(vdev
, vdev
->queue_sel
);
352 case VIRTIO_PCI_QUEUE_SEL
:
353 ret
= vdev
->queue_sel
;
355 case VIRTIO_PCI_STATUS
:
359 /* reading from the ISR also clears it. */
362 qemu_set_irq(proxy
->pci_dev
.irq
[0], 0);
364 case VIRTIO_MSI_CONFIG_VECTOR
:
365 ret
= vdev
->config_vector
;
367 case VIRTIO_MSI_QUEUE_VECTOR
:
368 ret
= virtio_queue_vector(vdev
, vdev
->queue_sel
);
377 static uint64_t virtio_pci_config_read(void *opaque
, hwaddr addr
,
380 VirtIOPCIProxy
*proxy
= opaque
;
381 uint32_t config
= VIRTIO_PCI_CONFIG(&proxy
->pci_dev
);
384 return virtio_ioport_read(proxy
, addr
);
390 val
= virtio_config_readb(proxy
->vdev
, addr
);
393 val
= virtio_config_readw(proxy
->vdev
, addr
);
394 if (virtio_is_big_endian()) {
399 val
= virtio_config_readl(proxy
->vdev
, addr
);
400 if (virtio_is_big_endian()) {
408 static void virtio_pci_config_write(void *opaque
, hwaddr addr
,
409 uint64_t val
, unsigned size
)
411 VirtIOPCIProxy
*proxy
= opaque
;
412 uint32_t config
= VIRTIO_PCI_CONFIG(&proxy
->pci_dev
);
414 virtio_ioport_write(proxy
, addr
, val
);
419 * Virtio-PCI is odd. Ioports are LE but config space is target native
424 virtio_config_writeb(proxy
->vdev
, addr
, val
);
427 if (virtio_is_big_endian()) {
430 virtio_config_writew(proxy
->vdev
, addr
, val
);
433 if (virtio_is_big_endian()) {
436 virtio_config_writel(proxy
->vdev
, addr
, val
);
441 static const MemoryRegionOps virtio_pci_config_ops
= {
442 .read
= virtio_pci_config_read
,
443 .write
= virtio_pci_config_write
,
445 .min_access_size
= 1,
446 .max_access_size
= 4,
448 .endianness
= DEVICE_LITTLE_ENDIAN
,
451 static void virtio_write_config(PCIDevice
*pci_dev
, uint32_t address
,
452 uint32_t val
, int len
)
454 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
456 pci_default_write_config(pci_dev
, address
, val
, len
);
458 if (range_covers_byte(address
, len
, PCI_COMMAND
) &&
459 !(pci_dev
->config
[PCI_COMMAND
] & PCI_COMMAND_MASTER
) &&
460 !(proxy
->flags
& VIRTIO_PCI_FLAG_BUS_MASTER_BUG
)) {
461 virtio_pci_stop_ioeventfd(proxy
);
462 virtio_set_status(proxy
->vdev
,
463 proxy
->vdev
->status
& ~VIRTIO_CONFIG_S_DRIVER_OK
);
467 static unsigned virtio_pci_get_features(void *opaque
)
469 VirtIOPCIProxy
*proxy
= opaque
;
470 return proxy
->host_features
;
473 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy
*proxy
,
474 unsigned int queue_no
,
478 VirtQueue
*vq
= virtio_get_queue(proxy
->vdev
, queue_no
);
479 EventNotifier
*n
= virtio_queue_get_guest_notifier(vq
);
480 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
483 if (irqfd
->users
== 0) {
484 ret
= kvm_irqchip_add_msi_route(kvm_state
, msg
);
492 ret
= kvm_irqchip_add_irqfd_notifier(kvm_state
, n
, irqfd
->virq
);
494 if (--irqfd
->users
== 0) {
495 kvm_irqchip_release_virq(kvm_state
, irqfd
->virq
);
500 virtio_queue_set_guest_notifier_fd_handler(vq
, true, true);
504 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy
*proxy
,
505 unsigned int queue_no
,
508 VirtQueue
*vq
= virtio_get_queue(proxy
->vdev
, queue_no
);
509 EventNotifier
*n
= virtio_queue_get_guest_notifier(vq
);
510 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
513 ret
= kvm_irqchip_remove_irqfd_notifier(kvm_state
, n
, irqfd
->virq
);
516 if (--irqfd
->users
== 0) {
517 kvm_irqchip_release_virq(kvm_state
, irqfd
->virq
);
520 virtio_queue_set_guest_notifier_fd_handler(vq
, true, false);
523 static int kvm_virtio_pci_vector_use(PCIDevice
*dev
, unsigned vector
,
526 VirtIOPCIProxy
*proxy
= container_of(dev
, VirtIOPCIProxy
, pci_dev
);
527 VirtIODevice
*vdev
= proxy
->vdev
;
530 for (queue_no
= 0; queue_no
< VIRTIO_PCI_QUEUE_MAX
; queue_no
++) {
531 if (!virtio_queue_get_num(vdev
, queue_no
)) {
534 if (virtio_queue_vector(vdev
, queue_no
) != vector
) {
537 ret
= kvm_virtio_pci_vq_vector_use(proxy
, queue_no
, vector
, msg
);
545 while (--queue_no
>= 0) {
546 if (virtio_queue_vector(vdev
, queue_no
) != vector
) {
549 kvm_virtio_pci_vq_vector_release(proxy
, queue_no
, vector
);
554 static void kvm_virtio_pci_vector_release(PCIDevice
*dev
, unsigned vector
)
556 VirtIOPCIProxy
*proxy
= container_of(dev
, VirtIOPCIProxy
, pci_dev
);
557 VirtIODevice
*vdev
= proxy
->vdev
;
560 for (queue_no
= 0; queue_no
< VIRTIO_PCI_QUEUE_MAX
; queue_no
++) {
561 if (!virtio_queue_get_num(vdev
, queue_no
)) {
564 if (virtio_queue_vector(vdev
, queue_no
) != vector
) {
567 kvm_virtio_pci_vq_vector_release(proxy
, queue_no
, vector
);
571 static int virtio_pci_set_guest_notifier(void *opaque
, int n
, bool assign
)
573 VirtIOPCIProxy
*proxy
= opaque
;
574 VirtQueue
*vq
= virtio_get_queue(proxy
->vdev
, n
);
575 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
578 int r
= event_notifier_init(notifier
, 0);
582 virtio_queue_set_guest_notifier_fd_handler(vq
, true, false);
584 virtio_queue_set_guest_notifier_fd_handler(vq
, false, false);
585 event_notifier_cleanup(notifier
);
591 static bool virtio_pci_query_guest_notifiers(void *opaque
)
593 VirtIOPCIProxy
*proxy
= opaque
;
594 return msix_enabled(&proxy
->pci_dev
);
597 static int virtio_pci_set_guest_notifiers(void *opaque
, bool assign
)
599 VirtIOPCIProxy
*proxy
= opaque
;
600 VirtIODevice
*vdev
= proxy
->vdev
;
603 /* Must unset vector notifier while guest notifier is still assigned */
604 if (kvm_msi_via_irqfd_enabled() && !assign
) {
605 msix_unset_vector_notifiers(&proxy
->pci_dev
);
606 g_free(proxy
->vector_irqfd
);
607 proxy
->vector_irqfd
= NULL
;
610 for (n
= 0; n
< VIRTIO_PCI_QUEUE_MAX
; n
++) {
611 if (!virtio_queue_get_num(vdev
, n
)) {
615 r
= virtio_pci_set_guest_notifier(opaque
, n
, assign
);
621 /* Must set vector notifier after guest notifier has been assigned */
622 if (kvm_msi_via_irqfd_enabled() && assign
) {
623 proxy
->vector_irqfd
=
624 g_malloc0(sizeof(*proxy
->vector_irqfd
) *
625 msix_nr_vectors_allocated(&proxy
->pci_dev
));
626 r
= msix_set_vector_notifiers(&proxy
->pci_dev
,
627 kvm_virtio_pci_vector_use
,
628 kvm_virtio_pci_vector_release
);
637 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
640 virtio_pci_set_guest_notifier(opaque
, n
, !assign
);
645 static int virtio_pci_set_host_notifier(void *opaque
, int n
, bool assign
)
647 VirtIOPCIProxy
*proxy
= opaque
;
649 /* Stop using ioeventfd for virtqueue kick if the device starts using host
650 * notifiers. This makes it easy to avoid stepping on each others' toes.
652 proxy
->ioeventfd_disabled
= assign
;
654 virtio_pci_stop_ioeventfd(proxy
);
656 /* We don't need to start here: it's not needed because backend
657 * currently only stops on status change away from ok,
658 * reset, vmstop and such. If we do add code to start here,
659 * need to check vmstate, device state etc. */
660 return virtio_pci_set_host_notifier_internal(proxy
, n
, assign
, false);
663 static void virtio_pci_vmstate_change(void *opaque
, bool running
)
665 VirtIOPCIProxy
*proxy
= opaque
;
668 /* Try to find out if the guest has bus master disabled, but is
669 in ready state. Then we have a buggy guest OS. */
670 if ((proxy
->vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
) &&
671 !(proxy
->pci_dev
.config
[PCI_COMMAND
] & PCI_COMMAND_MASTER
)) {
672 proxy
->flags
|= VIRTIO_PCI_FLAG_BUS_MASTER_BUG
;
674 virtio_pci_start_ioeventfd(proxy
);
676 virtio_pci_stop_ioeventfd(proxy
);
680 static const VirtIOBindings virtio_pci_bindings
= {
681 .notify
= virtio_pci_notify
,
682 .save_config
= virtio_pci_save_config
,
683 .load_config
= virtio_pci_load_config
,
684 .save_queue
= virtio_pci_save_queue
,
685 .load_queue
= virtio_pci_load_queue
,
686 .get_features
= virtio_pci_get_features
,
687 .query_guest_notifiers
= virtio_pci_query_guest_notifiers
,
688 .set_host_notifier
= virtio_pci_set_host_notifier
,
689 .set_guest_notifiers
= virtio_pci_set_guest_notifiers
,
690 .vmstate_change
= virtio_pci_vmstate_change
,
693 void virtio_init_pci(VirtIOPCIProxy
*proxy
, VirtIODevice
*vdev
)
700 config
= proxy
->pci_dev
.config
;
702 if (proxy
->class_code
) {
703 pci_config_set_class(config
, proxy
->class_code
);
705 pci_set_word(config
+ PCI_SUBSYSTEM_VENDOR_ID
,
706 pci_get_word(config
+ PCI_VENDOR_ID
));
707 pci_set_word(config
+ PCI_SUBSYSTEM_ID
, vdev
->device_id
);
708 config
[PCI_INTERRUPT_PIN
] = 1;
710 if (vdev
->nvectors
&&
711 msix_init_exclusive_bar(&proxy
->pci_dev
, vdev
->nvectors
, 1)) {
715 proxy
->pci_dev
.config_write
= virtio_write_config
;
717 size
= VIRTIO_PCI_REGION_SIZE(&proxy
->pci_dev
) + vdev
->config_len
;
719 size
= 1 << qemu_fls(size
);
721 memory_region_init_io(&proxy
->bar
, &virtio_pci_config_ops
, proxy
,
723 pci_register_bar(&proxy
->pci_dev
, 0, PCI_BASE_ADDRESS_SPACE_IO
,
726 if (!kvm_has_many_ioeventfds()) {
727 proxy
->flags
&= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD
;
730 virtio_bind_device(vdev
, &virtio_pci_bindings
, proxy
);
731 proxy
->host_features
|= 0x1 << VIRTIO_F_NOTIFY_ON_EMPTY
;
732 proxy
->host_features
|= 0x1 << VIRTIO_F_BAD_FEATURE
;
733 proxy
->host_features
= vdev
->get_features(vdev
, proxy
->host_features
);
736 static int virtio_blk_init_pci(PCIDevice
*pci_dev
)
738 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
741 if (proxy
->class_code
!= PCI_CLASS_STORAGE_SCSI
&&
742 proxy
->class_code
!= PCI_CLASS_STORAGE_OTHER
)
743 proxy
->class_code
= PCI_CLASS_STORAGE_SCSI
;
745 vdev
= virtio_blk_init(&pci_dev
->qdev
, &proxy
->blk
);
749 vdev
->nvectors
= proxy
->nvectors
;
750 virtio_init_pci(proxy
, vdev
);
751 /* make the actual value visible */
752 proxy
->nvectors
= vdev
->nvectors
;
756 static void virtio_exit_pci(PCIDevice
*pci_dev
)
758 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
760 memory_region_destroy(&proxy
->bar
);
761 msix_uninit_exclusive_bar(pci_dev
);
764 static void virtio_blk_exit_pci(PCIDevice
*pci_dev
)
766 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
768 virtio_pci_stop_ioeventfd(proxy
);
769 virtio_blk_exit(proxy
->vdev
);
770 virtio_exit_pci(pci_dev
);
773 static int virtio_serial_init_pci(PCIDevice
*pci_dev
)
775 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
778 if (proxy
->class_code
!= PCI_CLASS_COMMUNICATION_OTHER
&&
779 proxy
->class_code
!= PCI_CLASS_DISPLAY_OTHER
&& /* qemu 0.10 */
780 proxy
->class_code
!= PCI_CLASS_OTHERS
) /* qemu-kvm */
781 proxy
->class_code
= PCI_CLASS_COMMUNICATION_OTHER
;
783 vdev
= virtio_serial_init(&pci_dev
->qdev
, &proxy
->serial
);
787 vdev
->nvectors
= proxy
->nvectors
== DEV_NVECTORS_UNSPECIFIED
788 ? proxy
->serial
.max_virtserial_ports
+ 1
790 virtio_init_pci(proxy
, vdev
);
791 proxy
->nvectors
= vdev
->nvectors
;
795 static void virtio_serial_exit_pci(PCIDevice
*pci_dev
)
797 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
799 virtio_pci_stop_ioeventfd(proxy
);
800 virtio_serial_exit(proxy
->vdev
);
801 virtio_exit_pci(pci_dev
);
804 static int virtio_net_init_pci(PCIDevice
*pci_dev
)
806 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
809 vdev
= virtio_net_init(&pci_dev
->qdev
, &proxy
->nic
, &proxy
->net
);
811 vdev
->nvectors
= proxy
->nvectors
;
812 virtio_init_pci(proxy
, vdev
);
814 /* make the actual value visible */
815 proxy
->nvectors
= vdev
->nvectors
;
819 static void virtio_net_exit_pci(PCIDevice
*pci_dev
)
821 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
823 virtio_pci_stop_ioeventfd(proxy
);
824 virtio_net_exit(proxy
->vdev
);
825 virtio_exit_pci(pci_dev
);
828 static int virtio_balloon_init_pci(PCIDevice
*pci_dev
)
830 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
833 if (proxy
->class_code
!= PCI_CLASS_OTHERS
&&
834 proxy
->class_code
!= PCI_CLASS_MEMORY_RAM
) { /* qemu < 1.1 */
835 proxy
->class_code
= PCI_CLASS_OTHERS
;
838 vdev
= virtio_balloon_init(&pci_dev
->qdev
);
842 virtio_init_pci(proxy
, vdev
);
846 static void virtio_balloon_exit_pci(PCIDevice
*pci_dev
)
848 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
850 virtio_pci_stop_ioeventfd(proxy
);
851 virtio_balloon_exit(proxy
->vdev
);
852 virtio_exit_pci(pci_dev
);
855 static int virtio_rng_init_pci(PCIDevice
*pci_dev
)
857 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
860 if (proxy
->rng
.rng
== NULL
) {
861 proxy
->rng
.default_backend
= RNG_RANDOM(object_new(TYPE_RNG_RANDOM
));
863 object_property_add_child(OBJECT(pci_dev
),
865 OBJECT(proxy
->rng
.default_backend
),
868 object_property_set_link(OBJECT(pci_dev
),
869 OBJECT(proxy
->rng
.default_backend
),
873 vdev
= virtio_rng_init(&pci_dev
->qdev
, &proxy
->rng
);
877 virtio_init_pci(proxy
, vdev
);
881 static void virtio_rng_exit_pci(PCIDevice
*pci_dev
)
883 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
885 virtio_pci_stop_ioeventfd(proxy
);
886 virtio_rng_exit(proxy
->vdev
);
887 virtio_exit_pci(pci_dev
);
890 static Property virtio_blk_properties
[] = {
891 DEFINE_PROP_HEX32("class", VirtIOPCIProxy
, class_code
, 0),
892 DEFINE_BLOCK_PROPERTIES(VirtIOPCIProxy
, blk
.conf
),
893 DEFINE_BLOCK_CHS_PROPERTIES(VirtIOPCIProxy
, blk
.conf
),
894 DEFINE_PROP_STRING("serial", VirtIOPCIProxy
, blk
.serial
),
896 DEFINE_PROP_BIT("scsi", VirtIOPCIProxy
, blk
.scsi
, 0, true),
898 DEFINE_PROP_BIT("config-wce", VirtIOPCIProxy
, blk
.config_wce
, 0, true),
899 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy
, flags
, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT
, true),
900 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy
, nvectors
, 2),
901 DEFINE_VIRTIO_BLK_FEATURES(VirtIOPCIProxy
, host_features
),
902 DEFINE_PROP_END_OF_LIST(),
905 static void virtio_blk_class_init(ObjectClass
*klass
, void *data
)
907 DeviceClass
*dc
= DEVICE_CLASS(klass
);
908 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
910 k
->init
= virtio_blk_init_pci
;
911 k
->exit
= virtio_blk_exit_pci
;
912 k
->vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
;
913 k
->device_id
= PCI_DEVICE_ID_VIRTIO_BLOCK
;
914 k
->revision
= VIRTIO_PCI_ABI_VERSION
;
915 k
->class_id
= PCI_CLASS_STORAGE_SCSI
;
916 dc
->reset
= virtio_pci_reset
;
917 dc
->props
= virtio_blk_properties
;
920 static TypeInfo virtio_blk_info
= {
921 .name
= "virtio-blk-pci",
922 .parent
= TYPE_PCI_DEVICE
,
923 .instance_size
= sizeof(VirtIOPCIProxy
),
924 .class_init
= virtio_blk_class_init
,
927 static Property virtio_net_properties
[] = {
928 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy
, flags
, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT
, false),
929 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy
, nvectors
, 3),
930 DEFINE_VIRTIO_NET_FEATURES(VirtIOPCIProxy
, host_features
),
931 DEFINE_NIC_PROPERTIES(VirtIOPCIProxy
, nic
),
932 DEFINE_PROP_UINT32("x-txtimer", VirtIOPCIProxy
, net
.txtimer
, TX_TIMER_INTERVAL
),
933 DEFINE_PROP_INT32("x-txburst", VirtIOPCIProxy
, net
.txburst
, TX_BURST
),
934 DEFINE_PROP_STRING("tx", VirtIOPCIProxy
, net
.tx
),
935 DEFINE_PROP_END_OF_LIST(),
938 static void virtio_net_class_init(ObjectClass
*klass
, void *data
)
940 DeviceClass
*dc
= DEVICE_CLASS(klass
);
941 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
943 k
->init
= virtio_net_init_pci
;
944 k
->exit
= virtio_net_exit_pci
;
945 k
->romfile
= "pxe-virtio.rom";
946 k
->vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
;
947 k
->device_id
= PCI_DEVICE_ID_VIRTIO_NET
;
948 k
->revision
= VIRTIO_PCI_ABI_VERSION
;
949 k
->class_id
= PCI_CLASS_NETWORK_ETHERNET
;
950 dc
->reset
= virtio_pci_reset
;
951 dc
->props
= virtio_net_properties
;
954 static TypeInfo virtio_net_info
= {
955 .name
= "virtio-net-pci",
956 .parent
= TYPE_PCI_DEVICE
,
957 .instance_size
= sizeof(VirtIOPCIProxy
),
958 .class_init
= virtio_net_class_init
,
961 static Property virtio_serial_properties
[] = {
962 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy
, flags
, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT
, true),
963 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy
, nvectors
, DEV_NVECTORS_UNSPECIFIED
),
964 DEFINE_PROP_HEX32("class", VirtIOPCIProxy
, class_code
, 0),
965 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy
, host_features
),
966 DEFINE_PROP_UINT32("max_ports", VirtIOPCIProxy
, serial
.max_virtserial_ports
, 31),
967 DEFINE_PROP_END_OF_LIST(),
970 static void virtio_serial_class_init(ObjectClass
*klass
, void *data
)
972 DeviceClass
*dc
= DEVICE_CLASS(klass
);
973 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
975 k
->init
= virtio_serial_init_pci
;
976 k
->exit
= virtio_serial_exit_pci
;
977 k
->vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
;
978 k
->device_id
= PCI_DEVICE_ID_VIRTIO_CONSOLE
;
979 k
->revision
= VIRTIO_PCI_ABI_VERSION
;
980 k
->class_id
= PCI_CLASS_COMMUNICATION_OTHER
;
981 dc
->reset
= virtio_pci_reset
;
982 dc
->props
= virtio_serial_properties
;
985 static TypeInfo virtio_serial_info
= {
986 .name
= "virtio-serial-pci",
987 .parent
= TYPE_PCI_DEVICE
,
988 .instance_size
= sizeof(VirtIOPCIProxy
),
989 .class_init
= virtio_serial_class_init
,
992 static Property virtio_balloon_properties
[] = {
993 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy
, host_features
),
994 DEFINE_PROP_HEX32("class", VirtIOPCIProxy
, class_code
, 0),
995 DEFINE_PROP_END_OF_LIST(),
998 static void virtio_balloon_class_init(ObjectClass
*klass
, void *data
)
1000 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1001 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
1003 k
->init
= virtio_balloon_init_pci
;
1004 k
->exit
= virtio_balloon_exit_pci
;
1005 k
->vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
;
1006 k
->device_id
= PCI_DEVICE_ID_VIRTIO_BALLOON
;
1007 k
->revision
= VIRTIO_PCI_ABI_VERSION
;
1008 k
->class_id
= PCI_CLASS_OTHERS
;
1009 dc
->reset
= virtio_pci_reset
;
1010 dc
->props
= virtio_balloon_properties
;
1013 static TypeInfo virtio_balloon_info
= {
1014 .name
= "virtio-balloon-pci",
1015 .parent
= TYPE_PCI_DEVICE
,
1016 .instance_size
= sizeof(VirtIOPCIProxy
),
1017 .class_init
= virtio_balloon_class_init
,
1020 static void virtio_rng_initfn(Object
*obj
)
1022 PCIDevice
*pci_dev
= PCI_DEVICE(obj
);
1023 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
1025 object_property_add_link(obj
, "rng", TYPE_RNG_BACKEND
,
1026 (Object
**)&proxy
->rng
.rng
, NULL
);
1029 static Property virtio_rng_properties
[] = {
1030 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy
, host_features
),
1031 /* Set a default rate limit of 2^47 bytes per minute or roughly 2TB/s. If
1032 you have an entropy source capable of generating more entropy than this
1033 and you can pass it through via virtio-rng, then hats off to you. Until
1034 then, this is unlimited for all practical purposes.
1036 DEFINE_PROP_UINT64("max-bytes", VirtIOPCIProxy
, rng
.max_bytes
, INT64_MAX
),
1037 DEFINE_PROP_UINT32("period", VirtIOPCIProxy
, rng
.period_ms
, 1 << 16),
1038 DEFINE_PROP_END_OF_LIST(),
1041 static void virtio_rng_class_init(ObjectClass
*klass
, void *data
)
1043 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1044 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
1046 k
->init
= virtio_rng_init_pci
;
1047 k
->exit
= virtio_rng_exit_pci
;
1048 k
->vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
;
1049 k
->device_id
= PCI_DEVICE_ID_VIRTIO_RNG
;
1050 k
->revision
= VIRTIO_PCI_ABI_VERSION
;
1051 k
->class_id
= PCI_CLASS_OTHERS
;
1052 dc
->reset
= virtio_pci_reset
;
1053 dc
->props
= virtio_rng_properties
;
1056 static TypeInfo virtio_rng_info
= {
1057 .name
= "virtio-rng-pci",
1058 .parent
= TYPE_PCI_DEVICE
,
1059 .instance_size
= sizeof(VirtIOPCIProxy
),
1060 .instance_init
= virtio_rng_initfn
,
1061 .class_init
= virtio_rng_class_init
,
1064 static int virtio_scsi_init_pci(PCIDevice
*pci_dev
)
1066 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
1069 vdev
= virtio_scsi_init(&pci_dev
->qdev
, &proxy
->scsi
);
1074 vdev
->nvectors
= proxy
->nvectors
== DEV_NVECTORS_UNSPECIFIED
1075 ? proxy
->scsi
.num_queues
+ 3
1077 virtio_init_pci(proxy
, vdev
);
1079 /* make the actual value visible */
1080 proxy
->nvectors
= vdev
->nvectors
;
1084 static void virtio_scsi_exit_pci(PCIDevice
*pci_dev
)
1086 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
1088 virtio_scsi_exit(proxy
->vdev
);
1089 virtio_exit_pci(pci_dev
);
1092 static Property virtio_scsi_properties
[] = {
1093 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy
, flags
, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT
, true),
1094 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy
, nvectors
, DEV_NVECTORS_UNSPECIFIED
),
1095 DEFINE_VIRTIO_SCSI_PROPERTIES(VirtIOPCIProxy
, host_features
, scsi
),
1096 DEFINE_PROP_END_OF_LIST(),
1099 static void virtio_scsi_class_init(ObjectClass
*klass
, void *data
)
1101 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1102 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
1104 k
->init
= virtio_scsi_init_pci
;
1105 k
->exit
= virtio_scsi_exit_pci
;
1106 k
->vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
;
1107 k
->device_id
= PCI_DEVICE_ID_VIRTIO_SCSI
;
1109 k
->class_id
= PCI_CLASS_STORAGE_SCSI
;
1110 dc
->reset
= virtio_pci_reset
;
1111 dc
->props
= virtio_scsi_properties
;
1114 static TypeInfo virtio_scsi_info
= {
1115 .name
= "virtio-scsi-pci",
1116 .parent
= TYPE_PCI_DEVICE
,
1117 .instance_size
= sizeof(VirtIOPCIProxy
),
1118 .class_init
= virtio_scsi_class_init
,
1121 static void virtio_pci_register_types(void)
1123 type_register_static(&virtio_blk_info
);
1124 type_register_static(&virtio_net_info
);
1125 type_register_static(&virtio_serial_info
);
1126 type_register_static(&virtio_balloon_info
);
1127 type_register_static(&virtio_scsi_info
);
1128 type_register_static(&virtio_rng_info
);
1131 type_init(virtio_pci_register_types
)