4 * Copyright (c) 2011 Linaro Limited
7 * Peter Maydell <peter.maydell@linaro.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
23 #include "standard-headers/linux/virtio_mmio.h"
25 #include "hw/qdev-properties.h"
26 #include "hw/sysbus.h"
27 #include "hw/virtio/virtio.h"
28 #include "migration/qemu-file-types.h"
29 #include "qemu/host-utils.h"
30 #include "qemu/module.h"
31 #include "sysemu/kvm.h"
32 #include "hw/virtio/virtio-bus.h"
33 #include "qemu/error-report.h"
39 #define TYPE_VIRTIO_MMIO_BUS "virtio-mmio-bus"
40 #define VIRTIO_MMIO_BUS(obj) \
41 OBJECT_CHECK(VirtioBusState, (obj), TYPE_VIRTIO_MMIO_BUS)
42 #define VIRTIO_MMIO_BUS_GET_CLASS(obj) \
43 OBJECT_GET_CLASS(VirtioBusClass, (obj), TYPE_VIRTIO_MMIO_BUS)
44 #define VIRTIO_MMIO_BUS_CLASS(klass) \
45 OBJECT_CLASS_CHECK(VirtioBusClass, (klass), TYPE_VIRTIO_MMIO_BUS)
48 #define TYPE_VIRTIO_MMIO "virtio-mmio"
49 #define VIRTIO_MMIO(obj) \
50 OBJECT_CHECK(VirtIOMMIOProxy, (obj), TYPE_VIRTIO_MMIO)
52 #define VIRT_MAGIC 0x74726976 /* 'virt' */
53 #define VIRT_VERSION 1
54 #define VIRT_VENDOR 0x554D4551 /* 'QEMU' */
58 SysBusDevice parent_obj
;
61 /* Guest accessible state needing migration and reset */
62 uint32_t host_features_sel
;
63 uint32_t guest_features_sel
;
64 uint32_t guest_page_shift
;
67 bool format_transport_address
;
70 static bool virtio_mmio_ioeventfd_enabled(DeviceState
*d
)
72 return kvm_eventfds_enabled();
75 static int virtio_mmio_ioeventfd_assign(DeviceState
*d
,
76 EventNotifier
*notifier
,
79 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
82 memory_region_add_eventfd(&proxy
->iomem
, VIRTIO_MMIO_QUEUE_NOTIFY
, 4,
85 memory_region_del_eventfd(&proxy
->iomem
, VIRTIO_MMIO_QUEUE_NOTIFY
, 4,
91 static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy
*proxy
)
93 virtio_bus_start_ioeventfd(&proxy
->bus
);
96 static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy
*proxy
)
98 virtio_bus_stop_ioeventfd(&proxy
->bus
);
101 static uint64_t virtio_mmio_read(void *opaque
, hwaddr offset
, unsigned size
)
103 VirtIOMMIOProxy
*proxy
= (VirtIOMMIOProxy
*)opaque
;
104 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
106 trace_virtio_mmio_read(offset
);
109 /* If no backend is present, we treat most registers as
110 * read-as-zero, except for the magic number, version and
111 * vendor ID. This is not strictly sanctioned by the virtio
112 * spec, but it allows us to provide transports with no backend
113 * plugged in which don't confuse Linux's virtio code: the
114 * probe won't complain about the bad magic number, but the
115 * device ID of zero means no backend will claim it.
118 case VIRTIO_MMIO_MAGIC_VALUE
:
120 case VIRTIO_MMIO_VERSION
:
122 case VIRTIO_MMIO_VENDOR_ID
:
129 if (offset
>= VIRTIO_MMIO_CONFIG
) {
130 offset
-= VIRTIO_MMIO_CONFIG
;
133 return virtio_config_readb(vdev
, offset
);
135 return virtio_config_readw(vdev
, offset
);
137 return virtio_config_readl(vdev
, offset
);
143 qemu_log_mask(LOG_GUEST_ERROR
,
144 "%s: wrong size access to register!\n",
149 case VIRTIO_MMIO_MAGIC_VALUE
:
151 case VIRTIO_MMIO_VERSION
:
153 case VIRTIO_MMIO_DEVICE_ID
:
154 return vdev
->device_id
;
155 case VIRTIO_MMIO_VENDOR_ID
:
157 case VIRTIO_MMIO_DEVICE_FEATURES
:
158 if (proxy
->host_features_sel
) {
161 return vdev
->host_features
;
162 case VIRTIO_MMIO_QUEUE_NUM_MAX
:
163 if (!virtio_queue_get_num(vdev
, vdev
->queue_sel
)) {
166 return VIRTQUEUE_MAX_SIZE
;
167 case VIRTIO_MMIO_QUEUE_PFN
:
168 return virtio_queue_get_addr(vdev
, vdev
->queue_sel
)
169 >> proxy
->guest_page_shift
;
170 case VIRTIO_MMIO_INTERRUPT_STATUS
:
171 return atomic_read(&vdev
->isr
);
172 case VIRTIO_MMIO_STATUS
:
174 case VIRTIO_MMIO_DEVICE_FEATURES_SEL
:
175 case VIRTIO_MMIO_DRIVER_FEATURES
:
176 case VIRTIO_MMIO_DRIVER_FEATURES_SEL
:
177 case VIRTIO_MMIO_GUEST_PAGE_SIZE
:
178 case VIRTIO_MMIO_QUEUE_SEL
:
179 case VIRTIO_MMIO_QUEUE_NUM
:
180 case VIRTIO_MMIO_QUEUE_ALIGN
:
181 case VIRTIO_MMIO_QUEUE_NOTIFY
:
182 case VIRTIO_MMIO_INTERRUPT_ACK
:
183 qemu_log_mask(LOG_GUEST_ERROR
,
184 "%s: read of write-only register\n",
188 qemu_log_mask(LOG_GUEST_ERROR
, "%s: bad register offset\n", __func__
);
194 static void virtio_mmio_write(void *opaque
, hwaddr offset
, uint64_t value
,
197 VirtIOMMIOProxy
*proxy
= (VirtIOMMIOProxy
*)opaque
;
198 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
200 trace_virtio_mmio_write_offset(offset
, value
);
203 /* If no backend is present, we just make all registers
204 * write-ignored. This allows us to provide transports with
205 * no backend plugged in.
210 if (offset
>= VIRTIO_MMIO_CONFIG
) {
211 offset
-= VIRTIO_MMIO_CONFIG
;
214 virtio_config_writeb(vdev
, offset
, value
);
217 virtio_config_writew(vdev
, offset
, value
);
220 virtio_config_writel(vdev
, offset
, value
);
228 qemu_log_mask(LOG_GUEST_ERROR
,
229 "%s: wrong size access to register!\n",
234 case VIRTIO_MMIO_DEVICE_FEATURES_SEL
:
235 proxy
->host_features_sel
= value
;
237 case VIRTIO_MMIO_DRIVER_FEATURES
:
238 if (!proxy
->guest_features_sel
) {
239 virtio_set_features(vdev
, value
);
242 case VIRTIO_MMIO_DRIVER_FEATURES_SEL
:
243 proxy
->guest_features_sel
= value
;
245 case VIRTIO_MMIO_GUEST_PAGE_SIZE
:
246 proxy
->guest_page_shift
= ctz32(value
);
247 if (proxy
->guest_page_shift
> 31) {
248 proxy
->guest_page_shift
= 0;
250 trace_virtio_mmio_guest_page(value
, proxy
->guest_page_shift
);
252 case VIRTIO_MMIO_QUEUE_SEL
:
253 if (value
< VIRTIO_QUEUE_MAX
) {
254 vdev
->queue_sel
= value
;
257 case VIRTIO_MMIO_QUEUE_NUM
:
258 trace_virtio_mmio_queue_write(value
, VIRTQUEUE_MAX_SIZE
);
259 virtio_queue_set_num(vdev
, vdev
->queue_sel
, value
);
260 /* Note: only call this function for legacy devices */
261 virtio_queue_update_rings(vdev
, vdev
->queue_sel
);
263 case VIRTIO_MMIO_QUEUE_ALIGN
:
264 /* Note: this is only valid for legacy devices */
265 virtio_queue_set_align(vdev
, vdev
->queue_sel
, value
);
267 case VIRTIO_MMIO_QUEUE_PFN
:
271 virtio_queue_set_addr(vdev
, vdev
->queue_sel
,
272 value
<< proxy
->guest_page_shift
);
275 case VIRTIO_MMIO_QUEUE_NOTIFY
:
276 if (value
< VIRTIO_QUEUE_MAX
) {
277 virtio_queue_notify(vdev
, value
);
280 case VIRTIO_MMIO_INTERRUPT_ACK
:
281 atomic_and(&vdev
->isr
, ~value
);
282 virtio_update_irq(vdev
);
284 case VIRTIO_MMIO_STATUS
:
285 if (!(value
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
286 virtio_mmio_stop_ioeventfd(proxy
);
289 virtio_set_status(vdev
, value
& 0xff);
291 if (value
& VIRTIO_CONFIG_S_DRIVER_OK
) {
292 virtio_mmio_start_ioeventfd(proxy
);
295 if (vdev
->status
== 0) {
299 case VIRTIO_MMIO_MAGIC_VALUE
:
300 case VIRTIO_MMIO_VERSION
:
301 case VIRTIO_MMIO_DEVICE_ID
:
302 case VIRTIO_MMIO_VENDOR_ID
:
303 case VIRTIO_MMIO_DEVICE_FEATURES
:
304 case VIRTIO_MMIO_QUEUE_NUM_MAX
:
305 case VIRTIO_MMIO_INTERRUPT_STATUS
:
306 qemu_log_mask(LOG_GUEST_ERROR
,
307 "%s: write to readonly register\n",
312 qemu_log_mask(LOG_GUEST_ERROR
, "%s: bad register offset\n", __func__
);
316 static const MemoryRegionOps virtio_mem_ops
= {
317 .read
= virtio_mmio_read
,
318 .write
= virtio_mmio_write
,
319 .endianness
= DEVICE_NATIVE_ENDIAN
,
322 static void virtio_mmio_update_irq(DeviceState
*opaque
, uint16_t vector
)
324 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(opaque
);
325 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
331 level
= (atomic_read(&vdev
->isr
) != 0);
332 trace_virtio_mmio_setting_irq(level
);
333 qemu_set_irq(proxy
->irq
, level
);
336 static int virtio_mmio_load_config(DeviceState
*opaque
, QEMUFile
*f
)
338 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(opaque
);
340 proxy
->host_features_sel
= qemu_get_be32(f
);
341 proxy
->guest_features_sel
= qemu_get_be32(f
);
342 proxy
->guest_page_shift
= qemu_get_be32(f
);
346 static void virtio_mmio_save_config(DeviceState
*opaque
, QEMUFile
*f
)
348 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(opaque
);
350 qemu_put_be32(f
, proxy
->host_features_sel
);
351 qemu_put_be32(f
, proxy
->guest_features_sel
);
352 qemu_put_be32(f
, proxy
->guest_page_shift
);
355 static void virtio_mmio_reset(DeviceState
*d
)
357 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
359 virtio_mmio_stop_ioeventfd(proxy
);
360 virtio_bus_reset(&proxy
->bus
);
361 proxy
->host_features_sel
= 0;
362 proxy
->guest_features_sel
= 0;
363 proxy
->guest_page_shift
= 0;
366 static int virtio_mmio_set_guest_notifier(DeviceState
*d
, int n
, bool assign
,
369 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
370 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
371 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
372 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
373 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
376 int r
= event_notifier_init(notifier
, 0);
380 virtio_queue_set_guest_notifier_fd_handler(vq
, true, with_irqfd
);
382 virtio_queue_set_guest_notifier_fd_handler(vq
, false, with_irqfd
);
383 event_notifier_cleanup(notifier
);
386 if (vdc
->guest_notifier_mask
&& vdev
->use_guest_notifier_mask
) {
387 vdc
->guest_notifier_mask(vdev
, n
, !assign
);
393 static int virtio_mmio_set_guest_notifiers(DeviceState
*d
, int nvqs
,
396 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
397 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
398 /* TODO: need to check if kvm-arm supports irqfd */
399 bool with_irqfd
= false;
402 nvqs
= MIN(nvqs
, VIRTIO_QUEUE_MAX
);
404 for (n
= 0; n
< nvqs
; n
++) {
405 if (!virtio_queue_get_num(vdev
, n
)) {
409 r
= virtio_mmio_set_guest_notifier(d
, n
, assign
, with_irqfd
);
418 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
421 virtio_mmio_set_guest_notifier(d
, n
, !assign
, false);
426 /* virtio-mmio device */
428 static Property virtio_mmio_properties
[] = {
429 DEFINE_PROP_BOOL("format_transport_address", VirtIOMMIOProxy
,
430 format_transport_address
, true),
431 DEFINE_PROP_END_OF_LIST(),
434 static void virtio_mmio_realizefn(DeviceState
*d
, Error
**errp
)
436 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
437 SysBusDevice
*sbd
= SYS_BUS_DEVICE(d
);
439 qbus_create_inplace(&proxy
->bus
, sizeof(proxy
->bus
), TYPE_VIRTIO_MMIO_BUS
,
441 sysbus_init_irq(sbd
, &proxy
->irq
);
442 memory_region_init_io(&proxy
->iomem
, OBJECT(d
), &virtio_mem_ops
, proxy
,
443 TYPE_VIRTIO_MMIO
, 0x200);
444 sysbus_init_mmio(sbd
, &proxy
->iomem
);
447 static void virtio_mmio_class_init(ObjectClass
*klass
, void *data
)
449 DeviceClass
*dc
= DEVICE_CLASS(klass
);
451 dc
->realize
= virtio_mmio_realizefn
;
452 dc
->reset
= virtio_mmio_reset
;
453 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
454 dc
->props
= virtio_mmio_properties
;
457 static const TypeInfo virtio_mmio_info
= {
458 .name
= TYPE_VIRTIO_MMIO
,
459 .parent
= TYPE_SYS_BUS_DEVICE
,
460 .instance_size
= sizeof(VirtIOMMIOProxy
),
461 .class_init
= virtio_mmio_class_init
,
464 /* virtio-mmio-bus. */
466 static char *virtio_mmio_bus_get_dev_path(DeviceState
*dev
)
468 BusState
*virtio_mmio_bus
;
469 VirtIOMMIOProxy
*virtio_mmio_proxy
;
471 SysBusDevice
*proxy_sbd
;
474 virtio_mmio_bus
= qdev_get_parent_bus(dev
);
475 virtio_mmio_proxy
= VIRTIO_MMIO(virtio_mmio_bus
->parent
);
476 proxy_path
= qdev_get_dev_path(DEVICE(virtio_mmio_proxy
));
479 * If @format_transport_address is false, then we just perform the same as
480 * virtio_bus_get_dev_path(): we delegate the address formatting for the
481 * device on the virtio-mmio bus to the bus that the virtio-mmio proxy
482 * (i.e., the device that implements the virtio-mmio bus) resides on. In
483 * this case the base address of the virtio-mmio transport will be
486 if (!virtio_mmio_proxy
->format_transport_address
) {
490 /* Otherwise, we append the base address of the transport. */
491 proxy_sbd
= SYS_BUS_DEVICE(virtio_mmio_proxy
);
492 assert(proxy_sbd
->num_mmio
== 1);
493 assert(proxy_sbd
->mmio
[0].memory
== &virtio_mmio_proxy
->iomem
);
496 path
= g_strdup_printf("%s/virtio-mmio@" TARGET_FMT_plx
, proxy_path
,
497 proxy_sbd
->mmio
[0].addr
);
499 path
= g_strdup_printf("virtio-mmio@" TARGET_FMT_plx
,
500 proxy_sbd
->mmio
[0].addr
);
506 static void virtio_mmio_bus_class_init(ObjectClass
*klass
, void *data
)
508 BusClass
*bus_class
= BUS_CLASS(klass
);
509 VirtioBusClass
*k
= VIRTIO_BUS_CLASS(klass
);
511 k
->notify
= virtio_mmio_update_irq
;
512 k
->save_config
= virtio_mmio_save_config
;
513 k
->load_config
= virtio_mmio_load_config
;
514 k
->set_guest_notifiers
= virtio_mmio_set_guest_notifiers
;
515 k
->ioeventfd_enabled
= virtio_mmio_ioeventfd_enabled
;
516 k
->ioeventfd_assign
= virtio_mmio_ioeventfd_assign
;
517 k
->has_variable_vring_alignment
= true;
518 bus_class
->max_dev
= 1;
519 bus_class
->get_dev_path
= virtio_mmio_bus_get_dev_path
;
522 static const TypeInfo virtio_mmio_bus_info
= {
523 .name
= TYPE_VIRTIO_MMIO_BUS
,
524 .parent
= TYPE_VIRTIO_BUS
,
525 .instance_size
= sizeof(VirtioBusState
),
526 .class_init
= virtio_mmio_bus_class_init
,
529 static void virtio_mmio_register_types(void)
531 type_register_static(&virtio_mmio_bus_info
);
532 type_register_static(&virtio_mmio_info
);
535 type_init(virtio_mmio_register_types
)