4 * Copyright (c) 2011 Linaro Limited
7 * Peter Maydell <peter.maydell@linaro.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
23 #include "standard-headers/linux/virtio_mmio.h"
25 #include "hw/qdev-properties.h"
26 #include "hw/sysbus.h"
27 #include "hw/virtio/virtio.h"
28 #include "migration/qemu-file-types.h"
29 #include "qemu/host-utils.h"
30 #include "qemu/module.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/replay.h"
33 #include "hw/virtio/virtio-mmio.h"
34 #include "qemu/error-report.h"
38 static bool virtio_mmio_ioeventfd_enabled(DeviceState
*d
)
40 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
42 return (proxy
->flags
& VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD
) != 0;
45 static int virtio_mmio_ioeventfd_assign(DeviceState
*d
,
46 EventNotifier
*notifier
,
49 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
52 memory_region_add_eventfd(&proxy
->iomem
, VIRTIO_MMIO_QUEUE_NOTIFY
, 4,
55 memory_region_del_eventfd(&proxy
->iomem
, VIRTIO_MMIO_QUEUE_NOTIFY
, 4,
61 static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy
*proxy
)
63 virtio_bus_start_ioeventfd(&proxy
->bus
);
66 static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy
*proxy
)
68 virtio_bus_stop_ioeventfd(&proxy
->bus
);
71 static void virtio_mmio_soft_reset(VirtIOMMIOProxy
*proxy
)
75 virtio_bus_reset(&proxy
->bus
);
78 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
79 proxy
->vqs
[i
].enabled
= 0;
84 static uint64_t virtio_mmio_read(void *opaque
, hwaddr offset
, unsigned size
)
86 VirtIOMMIOProxy
*proxy
= (VirtIOMMIOProxy
*)opaque
;
87 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
89 trace_virtio_mmio_read(offset
);
92 /* If no backend is present, we treat most registers as
93 * read-as-zero, except for the magic number, version and
94 * vendor ID. This is not strictly sanctioned by the virtio
95 * spec, but it allows us to provide transports with no backend
96 * plugged in which don't confuse Linux's virtio code: the
97 * probe won't complain about the bad magic number, but the
98 * device ID of zero means no backend will claim it.
101 case VIRTIO_MMIO_MAGIC_VALUE
:
103 case VIRTIO_MMIO_VERSION
:
105 return VIRT_VERSION_LEGACY
;
109 case VIRTIO_MMIO_VENDOR_ID
:
116 if (offset
>= VIRTIO_MMIO_CONFIG
) {
117 offset
-= VIRTIO_MMIO_CONFIG
;
121 return virtio_config_readb(vdev
, offset
);
123 return virtio_config_readw(vdev
, offset
);
125 return virtio_config_readl(vdev
, offset
);
132 return virtio_config_modern_readb(vdev
, offset
);
134 return virtio_config_modern_readw(vdev
, offset
);
136 return virtio_config_modern_readl(vdev
, offset
);
143 qemu_log_mask(LOG_GUEST_ERROR
,
144 "%s: wrong size access to register!\n",
149 case VIRTIO_MMIO_MAGIC_VALUE
:
151 case VIRTIO_MMIO_VERSION
:
153 return VIRT_VERSION_LEGACY
;
157 case VIRTIO_MMIO_DEVICE_ID
:
158 return vdev
->device_id
;
159 case VIRTIO_MMIO_VENDOR_ID
:
161 case VIRTIO_MMIO_DEVICE_FEATURES
:
163 if (proxy
->host_features_sel
) {
166 return vdev
->host_features
;
169 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
170 return (vdev
->host_features
& ~vdc
->legacy_features
)
171 >> (32 * proxy
->host_features_sel
);
173 case VIRTIO_MMIO_QUEUE_NUM_MAX
:
174 if (!virtio_queue_get_num(vdev
, vdev
->queue_sel
)) {
177 return VIRTQUEUE_MAX_SIZE
;
178 case VIRTIO_MMIO_QUEUE_PFN
:
179 if (!proxy
->legacy
) {
180 qemu_log_mask(LOG_GUEST_ERROR
,
181 "%s: read from legacy register (0x%"
182 HWADDR_PRIx
") in non-legacy mode\n",
186 return virtio_queue_get_addr(vdev
, vdev
->queue_sel
)
187 >> proxy
->guest_page_shift
;
188 case VIRTIO_MMIO_QUEUE_READY
:
190 qemu_log_mask(LOG_GUEST_ERROR
,
191 "%s: read from non-legacy register (0x%"
192 HWADDR_PRIx
") in legacy mode\n",
196 return proxy
->vqs
[vdev
->queue_sel
].enabled
;
197 case VIRTIO_MMIO_INTERRUPT_STATUS
:
198 return qatomic_read(&vdev
->isr
);
199 case VIRTIO_MMIO_STATUS
:
201 case VIRTIO_MMIO_CONFIG_GENERATION
:
203 qemu_log_mask(LOG_GUEST_ERROR
,
204 "%s: read from non-legacy register (0x%"
205 HWADDR_PRIx
") in legacy mode\n",
209 return vdev
->generation
;
210 case VIRTIO_MMIO_SHM_LEN_LOW
:
211 case VIRTIO_MMIO_SHM_LEN_HIGH
:
213 * VIRTIO_MMIO_SHM_SEL is unimplemented
214 * according to the linux driver, if region length is -1
215 * the shared memory doesn't exist
218 case VIRTIO_MMIO_DEVICE_FEATURES_SEL
:
219 case VIRTIO_MMIO_DRIVER_FEATURES
:
220 case VIRTIO_MMIO_DRIVER_FEATURES_SEL
:
221 case VIRTIO_MMIO_GUEST_PAGE_SIZE
:
222 case VIRTIO_MMIO_QUEUE_SEL
:
223 case VIRTIO_MMIO_QUEUE_NUM
:
224 case VIRTIO_MMIO_QUEUE_ALIGN
:
225 case VIRTIO_MMIO_QUEUE_NOTIFY
:
226 case VIRTIO_MMIO_INTERRUPT_ACK
:
227 case VIRTIO_MMIO_QUEUE_DESC_LOW
:
228 case VIRTIO_MMIO_QUEUE_DESC_HIGH
:
229 case VIRTIO_MMIO_QUEUE_AVAIL_LOW
:
230 case VIRTIO_MMIO_QUEUE_AVAIL_HIGH
:
231 case VIRTIO_MMIO_QUEUE_USED_LOW
:
232 case VIRTIO_MMIO_QUEUE_USED_HIGH
:
233 qemu_log_mask(LOG_GUEST_ERROR
,
234 "%s: read of write-only register (0x%" HWADDR_PRIx
")\n",
238 qemu_log_mask(LOG_GUEST_ERROR
,
239 "%s: bad register offset (0x%" HWADDR_PRIx
")\n",
246 static void virtio_mmio_write(void *opaque
, hwaddr offset
, uint64_t value
,
249 VirtIOMMIOProxy
*proxy
= (VirtIOMMIOProxy
*)opaque
;
250 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
253 trace_virtio_mmio_write_offset(offset
, value
);
256 /* If no backend is present, we just make all registers
257 * write-ignored. This allows us to provide transports with
258 * no backend plugged in.
263 if (offset
>= VIRTIO_MMIO_CONFIG
) {
264 offset
-= VIRTIO_MMIO_CONFIG
;
268 virtio_config_writeb(vdev
, offset
, value
);
271 virtio_config_writew(vdev
, offset
, value
);
274 virtio_config_writel(vdev
, offset
, value
);
283 virtio_config_modern_writeb(vdev
, offset
, value
);
286 virtio_config_modern_writew(vdev
, offset
, value
);
289 virtio_config_modern_writel(vdev
, offset
, value
);
298 qemu_log_mask(LOG_GUEST_ERROR
,
299 "%s: wrong size access to register!\n",
304 case VIRTIO_MMIO_DEVICE_FEATURES_SEL
:
306 proxy
->host_features_sel
= 1;
308 proxy
->host_features_sel
= 0;
311 case VIRTIO_MMIO_DRIVER_FEATURES
:
313 if (proxy
->guest_features_sel
) {
314 qemu_log_mask(LOG_GUEST_ERROR
,
315 "%s: attempt to write guest features with "
316 "guest_features_sel > 0 in legacy mode\n",
319 virtio_set_features(vdev
, value
);
322 proxy
->guest_features
[proxy
->guest_features_sel
] = value
;
325 case VIRTIO_MMIO_DRIVER_FEATURES_SEL
:
327 proxy
->guest_features_sel
= 1;
329 proxy
->guest_features_sel
= 0;
332 case VIRTIO_MMIO_GUEST_PAGE_SIZE
:
333 if (!proxy
->legacy
) {
334 qemu_log_mask(LOG_GUEST_ERROR
,
335 "%s: write to legacy register (0x%"
336 HWADDR_PRIx
") in non-legacy mode\n",
340 proxy
->guest_page_shift
= ctz32(value
);
341 if (proxy
->guest_page_shift
> 31) {
342 proxy
->guest_page_shift
= 0;
344 trace_virtio_mmio_guest_page(value
, proxy
->guest_page_shift
);
346 case VIRTIO_MMIO_QUEUE_SEL
:
347 if (value
< VIRTIO_QUEUE_MAX
) {
348 vdev
->queue_sel
= value
;
351 case VIRTIO_MMIO_QUEUE_NUM
:
352 trace_virtio_mmio_queue_write(value
, VIRTQUEUE_MAX_SIZE
);
353 virtio_queue_set_num(vdev
, vdev
->queue_sel
, value
);
356 virtio_queue_update_rings(vdev
, vdev
->queue_sel
);
358 virtio_init_region_cache(vdev
, vdev
->queue_sel
);
359 proxy
->vqs
[vdev
->queue_sel
].num
= value
;
362 case VIRTIO_MMIO_QUEUE_ALIGN
:
363 if (!proxy
->legacy
) {
364 qemu_log_mask(LOG_GUEST_ERROR
,
365 "%s: write to legacy register (0x%"
366 HWADDR_PRIx
") in non-legacy mode\n",
370 virtio_queue_set_align(vdev
, vdev
->queue_sel
, value
);
372 case VIRTIO_MMIO_QUEUE_PFN
:
373 if (!proxy
->legacy
) {
374 qemu_log_mask(LOG_GUEST_ERROR
,
375 "%s: write to legacy register (0x%"
376 HWADDR_PRIx
") in non-legacy mode\n",
381 virtio_mmio_soft_reset(proxy
);
383 virtio_queue_set_addr(vdev
, vdev
->queue_sel
,
384 value
<< proxy
->guest_page_shift
);
387 case VIRTIO_MMIO_QUEUE_READY
:
389 qemu_log_mask(LOG_GUEST_ERROR
,
390 "%s: write to non-legacy register (0x%"
391 HWADDR_PRIx
") in legacy mode\n",
396 virtio_queue_set_num(vdev
, vdev
->queue_sel
,
397 proxy
->vqs
[vdev
->queue_sel
].num
);
398 virtio_queue_set_rings(vdev
, vdev
->queue_sel
,
399 ((uint64_t)proxy
->vqs
[vdev
->queue_sel
].desc
[1]) << 32 |
400 proxy
->vqs
[vdev
->queue_sel
].desc
[0],
401 ((uint64_t)proxy
->vqs
[vdev
->queue_sel
].avail
[1]) << 32 |
402 proxy
->vqs
[vdev
->queue_sel
].avail
[0],
403 ((uint64_t)proxy
->vqs
[vdev
->queue_sel
].used
[1]) << 32 |
404 proxy
->vqs
[vdev
->queue_sel
].used
[0]);
405 proxy
->vqs
[vdev
->queue_sel
].enabled
= 1;
407 proxy
->vqs
[vdev
->queue_sel
].enabled
= 0;
410 case VIRTIO_MMIO_QUEUE_NOTIFY
:
412 if (vq_idx
< VIRTIO_QUEUE_MAX
&& virtio_queue_get_num(vdev
, vq_idx
)) {
413 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_NOTIFICATION_DATA
)) {
414 VirtQueue
*vq
= virtio_get_queue(vdev
, vq_idx
);
416 virtio_queue_set_shadow_avail_idx(vq
, (value
>> 16) & 0xFFFF);
418 virtio_queue_notify(vdev
, vq_idx
);
421 case VIRTIO_MMIO_INTERRUPT_ACK
:
422 qatomic_and(&vdev
->isr
, ~value
);
423 virtio_update_irq(vdev
);
425 case VIRTIO_MMIO_STATUS
:
426 if (!(value
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
427 virtio_mmio_stop_ioeventfd(proxy
);
430 if (!proxy
->legacy
&& (value
& VIRTIO_CONFIG_S_FEATURES_OK
)) {
431 virtio_set_features(vdev
,
432 ((uint64_t)proxy
->guest_features
[1]) << 32 |
433 proxy
->guest_features
[0]);
436 virtio_set_status(vdev
, value
& 0xff);
438 if (value
& VIRTIO_CONFIG_S_DRIVER_OK
) {
439 virtio_mmio_start_ioeventfd(proxy
);
442 if (vdev
->status
== 0) {
443 virtio_mmio_soft_reset(proxy
);
446 case VIRTIO_MMIO_QUEUE_DESC_LOW
:
448 qemu_log_mask(LOG_GUEST_ERROR
,
449 "%s: write to non-legacy register (0x%"
450 HWADDR_PRIx
") in legacy mode\n",
454 proxy
->vqs
[vdev
->queue_sel
].desc
[0] = value
;
456 case VIRTIO_MMIO_QUEUE_DESC_HIGH
:
458 qemu_log_mask(LOG_GUEST_ERROR
,
459 "%s: write to non-legacy register (0x%"
460 HWADDR_PRIx
") in legacy mode\n",
464 proxy
->vqs
[vdev
->queue_sel
].desc
[1] = value
;
466 case VIRTIO_MMIO_QUEUE_AVAIL_LOW
:
468 qemu_log_mask(LOG_GUEST_ERROR
,
469 "%s: write to non-legacy register (0x%"
470 HWADDR_PRIx
") in legacy mode\n",
474 proxy
->vqs
[vdev
->queue_sel
].avail
[0] = value
;
476 case VIRTIO_MMIO_QUEUE_AVAIL_HIGH
:
478 qemu_log_mask(LOG_GUEST_ERROR
,
479 "%s: write to non-legacy register (0x%"
480 HWADDR_PRIx
") in legacy mode\n",
484 proxy
->vqs
[vdev
->queue_sel
].avail
[1] = value
;
486 case VIRTIO_MMIO_QUEUE_USED_LOW
:
488 qemu_log_mask(LOG_GUEST_ERROR
,
489 "%s: write to non-legacy register (0x%"
490 HWADDR_PRIx
") in legacy mode\n",
494 proxy
->vqs
[vdev
->queue_sel
].used
[0] = value
;
496 case VIRTIO_MMIO_QUEUE_USED_HIGH
:
498 qemu_log_mask(LOG_GUEST_ERROR
,
499 "%s: write to non-legacy register (0x%"
500 HWADDR_PRIx
") in legacy mode\n",
504 proxy
->vqs
[vdev
->queue_sel
].used
[1] = value
;
506 case VIRTIO_MMIO_MAGIC_VALUE
:
507 case VIRTIO_MMIO_VERSION
:
508 case VIRTIO_MMIO_DEVICE_ID
:
509 case VIRTIO_MMIO_VENDOR_ID
:
510 case VIRTIO_MMIO_DEVICE_FEATURES
:
511 case VIRTIO_MMIO_QUEUE_NUM_MAX
:
512 case VIRTIO_MMIO_INTERRUPT_STATUS
:
513 case VIRTIO_MMIO_CONFIG_GENERATION
:
514 qemu_log_mask(LOG_GUEST_ERROR
,
515 "%s: write to read-only register (0x%" HWADDR_PRIx
")\n",
520 qemu_log_mask(LOG_GUEST_ERROR
,
521 "%s: bad register offset (0x%" HWADDR_PRIx
")\n",
526 static const MemoryRegionOps virtio_legacy_mem_ops
= {
527 .read
= virtio_mmio_read
,
528 .write
= virtio_mmio_write
,
529 .endianness
= DEVICE_NATIVE_ENDIAN
,
532 static const MemoryRegionOps virtio_mem_ops
= {
533 .read
= virtio_mmio_read
,
534 .write
= virtio_mmio_write
,
535 .endianness
= DEVICE_LITTLE_ENDIAN
,
538 static void virtio_mmio_update_irq(DeviceState
*opaque
, uint16_t vector
)
540 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(opaque
);
541 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
547 level
= (qatomic_read(&vdev
->isr
) != 0);
548 trace_virtio_mmio_setting_irq(level
);
549 qemu_set_irq(proxy
->irq
, level
);
552 static int virtio_mmio_load_config(DeviceState
*opaque
, QEMUFile
*f
)
554 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(opaque
);
556 proxy
->host_features_sel
= qemu_get_be32(f
);
557 proxy
->guest_features_sel
= qemu_get_be32(f
);
558 proxy
->guest_page_shift
= qemu_get_be32(f
);
562 static void virtio_mmio_save_config(DeviceState
*opaque
, QEMUFile
*f
)
564 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(opaque
);
566 qemu_put_be32(f
, proxy
->host_features_sel
);
567 qemu_put_be32(f
, proxy
->guest_features_sel
);
568 qemu_put_be32(f
, proxy
->guest_page_shift
);
571 static const VMStateDescription vmstate_virtio_mmio_queue_state
= {
572 .name
= "virtio_mmio/queue_state",
574 .minimum_version_id
= 1,
575 .fields
= (const VMStateField
[]) {
576 VMSTATE_UINT16(num
, VirtIOMMIOQueue
),
577 VMSTATE_BOOL(enabled
, VirtIOMMIOQueue
),
578 VMSTATE_UINT32_ARRAY(desc
, VirtIOMMIOQueue
, 2),
579 VMSTATE_UINT32_ARRAY(avail
, VirtIOMMIOQueue
, 2),
580 VMSTATE_UINT32_ARRAY(used
, VirtIOMMIOQueue
, 2),
581 VMSTATE_END_OF_LIST()
585 static const VMStateDescription vmstate_virtio_mmio_state_sub
= {
586 .name
= "virtio_mmio/state",
588 .minimum_version_id
= 1,
589 .fields
= (const VMStateField
[]) {
590 VMSTATE_UINT32_ARRAY(guest_features
, VirtIOMMIOProxy
, 2),
591 VMSTATE_STRUCT_ARRAY(vqs
, VirtIOMMIOProxy
, VIRTIO_QUEUE_MAX
, 0,
592 vmstate_virtio_mmio_queue_state
,
594 VMSTATE_END_OF_LIST()
598 static const VMStateDescription vmstate_virtio_mmio
= {
599 .name
= "virtio_mmio",
601 .minimum_version_id
= 1,
602 .fields
= (const VMStateField
[]) {
603 VMSTATE_END_OF_LIST()
605 .subsections
= (const VMStateDescription
* const []) {
606 &vmstate_virtio_mmio_state_sub
,
611 static void virtio_mmio_save_extra_state(DeviceState
*opaque
, QEMUFile
*f
)
613 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(opaque
);
615 vmstate_save_state(f
, &vmstate_virtio_mmio
, proxy
, NULL
);
618 static int virtio_mmio_load_extra_state(DeviceState
*opaque
, QEMUFile
*f
)
620 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(opaque
);
622 return vmstate_load_state(f
, &vmstate_virtio_mmio
, proxy
, 1);
625 static bool virtio_mmio_has_extra_state(DeviceState
*opaque
)
627 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(opaque
);
629 return !proxy
->legacy
;
632 static void virtio_mmio_reset(DeviceState
*d
)
634 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
637 virtio_mmio_soft_reset(proxy
);
639 proxy
->host_features_sel
= 0;
640 proxy
->guest_features_sel
= 0;
641 proxy
->guest_page_shift
= 0;
643 if (!proxy
->legacy
) {
644 proxy
->guest_features
[0] = proxy
->guest_features
[1] = 0;
646 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
647 proxy
->vqs
[i
].num
= 0;
648 proxy
->vqs
[i
].desc
[0] = proxy
->vqs
[i
].desc
[1] = 0;
649 proxy
->vqs
[i
].avail
[0] = proxy
->vqs
[i
].avail
[1] = 0;
650 proxy
->vqs
[i
].used
[0] = proxy
->vqs
[i
].used
[1] = 0;
655 static int virtio_mmio_set_guest_notifier(DeviceState
*d
, int n
, bool assign
,
658 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
659 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
660 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
661 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
662 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
665 int r
= event_notifier_init(notifier
, 0);
669 virtio_queue_set_guest_notifier_fd_handler(vq
, true, with_irqfd
);
671 virtio_queue_set_guest_notifier_fd_handler(vq
, false, with_irqfd
);
672 event_notifier_cleanup(notifier
);
675 if (vdc
->guest_notifier_mask
&& vdev
->use_guest_notifier_mask
) {
676 vdc
->guest_notifier_mask(vdev
, n
, !assign
);
681 static int virtio_mmio_set_config_guest_notifier(DeviceState
*d
, bool assign
,
684 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
685 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
686 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
687 EventNotifier
*notifier
= virtio_config_get_guest_notifier(vdev
);
691 r
= event_notifier_init(notifier
, 0);
695 virtio_config_set_guest_notifier_fd_handler(vdev
, assign
, with_irqfd
);
697 virtio_config_set_guest_notifier_fd_handler(vdev
, assign
, with_irqfd
);
698 event_notifier_cleanup(notifier
);
700 if (vdc
->guest_notifier_mask
&& vdev
->use_guest_notifier_mask
) {
701 vdc
->guest_notifier_mask(vdev
, VIRTIO_CONFIG_IRQ_IDX
, !assign
);
705 static int virtio_mmio_set_guest_notifiers(DeviceState
*d
, int nvqs
,
708 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
709 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
710 /* TODO: need to check if kvm-arm supports irqfd */
711 bool with_irqfd
= false;
714 nvqs
= MIN(nvqs
, VIRTIO_QUEUE_MAX
);
716 for (n
= 0; n
< nvqs
; n
++) {
717 if (!virtio_queue_get_num(vdev
, n
)) {
721 r
= virtio_mmio_set_guest_notifier(d
, n
, assign
, with_irqfd
);
726 r
= virtio_mmio_set_config_guest_notifier(d
, assign
, with_irqfd
);
734 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
737 virtio_mmio_set_guest_notifier(d
, n
, !assign
, false);
742 static void virtio_mmio_pre_plugged(DeviceState
*d
, Error
**errp
)
744 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
745 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
747 if (!proxy
->legacy
) {
748 virtio_add_feature(&vdev
->host_features
, VIRTIO_F_VERSION_1
);
752 /* virtio-mmio device */
754 static Property virtio_mmio_properties
[] = {
755 DEFINE_PROP_BOOL("format_transport_address", VirtIOMMIOProxy
,
756 format_transport_address
, true),
757 DEFINE_PROP_BOOL("force-legacy", VirtIOMMIOProxy
, legacy
, true),
758 DEFINE_PROP_BIT("ioeventfd", VirtIOMMIOProxy
, flags
,
759 VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD_BIT
, true),
760 DEFINE_PROP_END_OF_LIST(),
763 static void virtio_mmio_realizefn(DeviceState
*d
, Error
**errp
)
765 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
766 SysBusDevice
*sbd
= SYS_BUS_DEVICE(d
);
768 qbus_init(&proxy
->bus
, sizeof(proxy
->bus
), TYPE_VIRTIO_MMIO_BUS
, d
, NULL
);
769 sysbus_init_irq(sbd
, &proxy
->irq
);
771 /* fd-based ioevents can't be synchronized in record/replay */
772 if (replay_mode
!= REPLAY_MODE_NONE
) {
773 proxy
->flags
&= ~VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD
;
777 memory_region_init_io(&proxy
->iomem
, OBJECT(d
),
778 &virtio_legacy_mem_ops
, proxy
,
779 TYPE_VIRTIO_MMIO
, 0x200);
781 memory_region_init_io(&proxy
->iomem
, OBJECT(d
),
782 &virtio_mem_ops
, proxy
,
783 TYPE_VIRTIO_MMIO
, 0x200);
785 sysbus_init_mmio(sbd
, &proxy
->iomem
);
788 static void virtio_mmio_class_init(ObjectClass
*klass
, void *data
)
790 DeviceClass
*dc
= DEVICE_CLASS(klass
);
792 dc
->realize
= virtio_mmio_realizefn
;
793 device_class_set_legacy_reset(dc
, virtio_mmio_reset
);
794 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
795 device_class_set_props(dc
, virtio_mmio_properties
);
798 static const TypeInfo virtio_mmio_info
= {
799 .name
= TYPE_VIRTIO_MMIO
,
800 .parent
= TYPE_SYS_BUS_DEVICE
,
801 .instance_size
= sizeof(VirtIOMMIOProxy
),
802 .class_init
= virtio_mmio_class_init
,
805 /* virtio-mmio-bus. */
807 static char *virtio_mmio_bus_get_dev_path(DeviceState
*dev
)
809 BusState
*virtio_mmio_bus
;
810 VirtIOMMIOProxy
*virtio_mmio_proxy
;
813 MemoryRegionSection section
;
815 virtio_mmio_bus
= qdev_get_parent_bus(dev
);
816 virtio_mmio_proxy
= VIRTIO_MMIO(virtio_mmio_bus
->parent
);
817 proxy_path
= qdev_get_dev_path(DEVICE(virtio_mmio_proxy
));
820 * If @format_transport_address is false, then we just perform the same as
821 * virtio_bus_get_dev_path(): we delegate the address formatting for the
822 * device on the virtio-mmio bus to the bus that the virtio-mmio proxy
823 * (i.e., the device that implements the virtio-mmio bus) resides on. In
824 * this case the base address of the virtio-mmio transport will be
827 if (!virtio_mmio_proxy
->format_transport_address
) {
831 /* Otherwise, we append the base address of the transport. */
832 section
= memory_region_find(&virtio_mmio_proxy
->iomem
, 0, 0x200);
836 path
= g_strdup_printf("%s/virtio-mmio@" HWADDR_FMT_plx
, proxy_path
,
837 section
.offset_within_address_space
);
839 path
= g_strdup_printf("virtio-mmio@" HWADDR_FMT_plx
,
840 section
.offset_within_address_space
);
842 memory_region_unref(section
.mr
);
848 static void virtio_mmio_vmstate_change(DeviceState
*d
, bool running
)
850 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
853 virtio_mmio_start_ioeventfd(proxy
);
855 virtio_mmio_stop_ioeventfd(proxy
);
859 static void virtio_mmio_bus_class_init(ObjectClass
*klass
, void *data
)
861 BusClass
*bus_class
= BUS_CLASS(klass
);
862 VirtioBusClass
*k
= VIRTIO_BUS_CLASS(klass
);
864 k
->notify
= virtio_mmio_update_irq
;
865 k
->save_config
= virtio_mmio_save_config
;
866 k
->load_config
= virtio_mmio_load_config
;
867 k
->save_extra_state
= virtio_mmio_save_extra_state
;
868 k
->load_extra_state
= virtio_mmio_load_extra_state
;
869 k
->has_extra_state
= virtio_mmio_has_extra_state
;
870 k
->set_guest_notifiers
= virtio_mmio_set_guest_notifiers
;
871 k
->ioeventfd_enabled
= virtio_mmio_ioeventfd_enabled
;
872 k
->ioeventfd_assign
= virtio_mmio_ioeventfd_assign
;
873 k
->pre_plugged
= virtio_mmio_pre_plugged
;
874 k
->vmstate_change
= virtio_mmio_vmstate_change
;
875 k
->has_variable_vring_alignment
= true;
876 bus_class
->max_dev
= 1;
877 bus_class
->get_dev_path
= virtio_mmio_bus_get_dev_path
;
880 static const TypeInfo virtio_mmio_bus_info
= {
881 .name
= TYPE_VIRTIO_MMIO_BUS
,
882 .parent
= TYPE_VIRTIO_BUS
,
883 .instance_size
= sizeof(VirtioBusState
),
884 .class_init
= virtio_mmio_bus_class_init
,
887 static void virtio_mmio_register_types(void)
889 type_register_static(&virtio_mmio_bus_info
);
890 type_register_static(&virtio_mmio_info
);
893 type_init(virtio_mmio_register_types
)