2 * Virtio memory mapped device driver
4 * Copyright 2011-2014, ARM Ltd.
6 * This module allows virtio devices to be used over a virtual, memory mapped
9 * The guest device(s) may be instantiated in one of three equivalent ways:
11 * 1. Static platform device in board's code, eg.:
13 * static struct platform_device v2m_virtio_device = {
14 * .name = "virtio-mmio",
17 * .resource = (struct resource []) {
19 * .start = 0x1001e000,
21 * .flags = IORESOURCE_MEM,
25 * .flags = IORESOURCE_IRQ,
30 * 2. Device Tree node, eg.:
32 * virtio_block@1e000 {
33 * compatible = "virtio,mmio";
34 * reg = <0x1e000 0x100>;
38 * 3. Kernel module (or command line) parameter. Can be used more than once -
39 * one device will be created for each one. Syntax:
41 * [virtio_mmio.]device=<size>@<baseaddr>:<irq>[:<id>]
43 * <size> := size (can use standard suffixes like K, M or G)
44 * <baseaddr> := physical base address
45 * <irq> := interrupt number (as passed to request_irq())
46 * <id> := (optional) platform device id
48 * virtio_mmio.device=0x100@0x100b0000:48 \
49 * virtio_mmio.device=1K@0x1001e000:74
53 * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007
55 * This work is licensed under the terms of the GNU GPL, version 2 or later.
56 * See the COPYING file in the top-level directory.
59 #define pr_fmt(fmt) "virtio-mmio: " fmt
61 #include <linux/highmem.h>
62 #include <linux/interrupt.h>
64 #include <linux/list.h>
65 #include <linux/module.h>
66 #include <linux/platform_device.h>
67 #include <linux/slab.h>
68 #include <linux/spinlock.h>
69 #include <linux/virtio.h>
70 #include <linux/virtio_config.h>
71 #include <linux/virtio_mmio.h>
72 #include <linux/virtio_ring.h>
76 /* The alignment to use between consumer and producer parts of vring.
77 * Currently hardcoded to the page size. */
78 #define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE
82 #define to_virtio_mmio_device(_plat_dev) \
83 container_of(_plat_dev, struct virtio_mmio_device, vdev)
85 struct virtio_mmio_device
{
86 struct virtio_device vdev
;
87 struct platform_device
*pdev
;
90 unsigned long version
;
92 /* a list of queues so we can dispatch IRQs */
94 struct list_head virtqueues
;
97 struct virtio_mmio_vq_info
{
98 /* the actual virtqueue */
101 /* the number of entries in the queue */
104 /* the virtual address of the ring queue */
107 /* the list node for the virtqueues list */
108 struct list_head node
;
113 /* Configuration interface */
115 static u64
vm_get_features(struct virtio_device
*vdev
)
117 struct virtio_mmio_device
*vm_dev
= to_virtio_mmio_device(vdev
);
120 writel(1, vm_dev
->base
+ VIRTIO_MMIO_DEVICE_FEATURES_SEL
);
121 features
= readl(vm_dev
->base
+ VIRTIO_MMIO_DEVICE_FEATURES
);
124 writel(0, vm_dev
->base
+ VIRTIO_MMIO_DEVICE_FEATURES_SEL
);
125 features
|= readl(vm_dev
->base
+ VIRTIO_MMIO_DEVICE_FEATURES
);
130 static int vm_finalize_features(struct virtio_device
*vdev
)
132 struct virtio_mmio_device
*vm_dev
= to_virtio_mmio_device(vdev
);
134 /* Give virtio_ring a chance to accept features. */
135 vring_transport_features(vdev
);
137 /* Make sure there is are no mixed devices */
138 if (vm_dev
->version
== 2 &&
139 !__virtio_test_bit(vdev
, VIRTIO_F_VERSION_1
)) {
140 dev_err(&vdev
->dev
, "New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n");
144 writel(1, vm_dev
->base
+ VIRTIO_MMIO_DRIVER_FEATURES_SEL
);
145 writel((u32
)(vdev
->features
>> 32),
146 vm_dev
->base
+ VIRTIO_MMIO_DRIVER_FEATURES
);
148 writel(0, vm_dev
->base
+ VIRTIO_MMIO_DRIVER_FEATURES_SEL
);
149 writel((u32
)vdev
->features
,
150 vm_dev
->base
+ VIRTIO_MMIO_DRIVER_FEATURES
);
155 static void vm_get(struct virtio_device
*vdev
, unsigned offset
,
156 void *buf
, unsigned len
)
158 struct virtio_mmio_device
*vm_dev
= to_virtio_mmio_device(vdev
);
159 void __iomem
*base
= vm_dev
->base
+ VIRTIO_MMIO_CONFIG
;
164 if (vm_dev
->version
== 1) {
168 for (i
= 0; i
< len
; i
++)
169 ptr
[i
] = readb(base
+ offset
+ i
);
175 b
= readb(base
+ offset
);
176 memcpy(buf
, &b
, sizeof b
);
179 w
= cpu_to_le16(readw(base
+ offset
));
180 memcpy(buf
, &w
, sizeof w
);
183 l
= cpu_to_le32(readl(base
+ offset
));
184 memcpy(buf
, &l
, sizeof l
);
187 l
= cpu_to_le32(readl(base
+ offset
));
188 memcpy(buf
, &l
, sizeof l
);
189 l
= cpu_to_le32(ioread32(base
+ offset
+ sizeof l
));
190 memcpy(buf
+ sizeof l
, &l
, sizeof l
);
197 static void vm_set(struct virtio_device
*vdev
, unsigned offset
,
198 const void *buf
, unsigned len
)
200 struct virtio_mmio_device
*vm_dev
= to_virtio_mmio_device(vdev
);
201 void __iomem
*base
= vm_dev
->base
+ VIRTIO_MMIO_CONFIG
;
206 if (vm_dev
->version
== 1) {
210 for (i
= 0; i
< len
; i
++)
211 writeb(ptr
[i
], base
+ offset
+ i
);
218 memcpy(&b
, buf
, sizeof b
);
219 writeb(b
, base
+ offset
);
222 memcpy(&w
, buf
, sizeof w
);
223 writew(le16_to_cpu(w
), base
+ offset
);
226 memcpy(&l
, buf
, sizeof l
);
227 writel(le32_to_cpu(l
), base
+ offset
);
230 memcpy(&l
, buf
, sizeof l
);
231 writel(le32_to_cpu(l
), base
+ offset
);
232 memcpy(&l
, buf
+ sizeof l
, sizeof l
);
233 writel(le32_to_cpu(l
), base
+ offset
+ sizeof l
);
240 static u32
vm_generation(struct virtio_device
*vdev
)
242 struct virtio_mmio_device
*vm_dev
= to_virtio_mmio_device(vdev
);
244 if (vm_dev
->version
== 1)
247 return readl(vm_dev
->base
+ VIRTIO_MMIO_CONFIG_GENERATION
);
250 static u8
vm_get_status(struct virtio_device
*vdev
)
252 struct virtio_mmio_device
*vm_dev
= to_virtio_mmio_device(vdev
);
254 return readl(vm_dev
->base
+ VIRTIO_MMIO_STATUS
) & 0xff;
257 static void vm_set_status(struct virtio_device
*vdev
, u8 status
)
259 struct virtio_mmio_device
*vm_dev
= to_virtio_mmio_device(vdev
);
261 /* We should never be setting status to 0. */
264 writel(status
, vm_dev
->base
+ VIRTIO_MMIO_STATUS
);
267 static void vm_reset(struct virtio_device
*vdev
)
269 struct virtio_mmio_device
*vm_dev
= to_virtio_mmio_device(vdev
);
271 /* 0 status means a reset. */
272 writel(0, vm_dev
->base
+ VIRTIO_MMIO_STATUS
);
277 /* Transport interface */
279 /* the notify function used when creating a virt queue */
280 static bool vm_notify(struct virtqueue
*vq
)
282 struct virtio_mmio_device
*vm_dev
= to_virtio_mmio_device(vq
->vdev
);
284 /* We write the queue's selector into the notification register to
285 * signal the other end */
286 writel(vq
->index
, vm_dev
->base
+ VIRTIO_MMIO_QUEUE_NOTIFY
);
290 /* Notify all virtqueues on an interrupt. */
291 static irqreturn_t
vm_interrupt(int irq
, void *opaque
)
293 struct virtio_mmio_device
*vm_dev
= opaque
;
294 struct virtio_mmio_vq_info
*info
;
295 unsigned long status
;
297 irqreturn_t ret
= IRQ_NONE
;
299 /* Read and acknowledge interrupts */
300 status
= readl(vm_dev
->base
+ VIRTIO_MMIO_INTERRUPT_STATUS
);
301 writel(status
, vm_dev
->base
+ VIRTIO_MMIO_INTERRUPT_ACK
);
303 if (unlikely(status
& VIRTIO_MMIO_INT_CONFIG
)) {
304 virtio_config_changed(&vm_dev
->vdev
);
308 if (likely(status
& VIRTIO_MMIO_INT_VRING
)) {
309 spin_lock_irqsave(&vm_dev
->lock
, flags
);
310 list_for_each_entry(info
, &vm_dev
->virtqueues
, node
)
311 ret
|= vring_interrupt(irq
, info
->vq
);
312 spin_unlock_irqrestore(&vm_dev
->lock
, flags
);
320 static void vm_del_vq(struct virtqueue
*vq
)
322 struct virtio_mmio_device
*vm_dev
= to_virtio_mmio_device(vq
->vdev
);
323 struct virtio_mmio_vq_info
*info
= vq
->priv
;
324 unsigned long flags
, size
;
325 unsigned int index
= vq
->index
;
327 spin_lock_irqsave(&vm_dev
->lock
, flags
);
328 list_del(&info
->node
);
329 spin_unlock_irqrestore(&vm_dev
->lock
, flags
);
331 vring_del_virtqueue(vq
);
333 /* Select and deactivate the queue */
334 writel(index
, vm_dev
->base
+ VIRTIO_MMIO_QUEUE_SEL
);
335 if (vm_dev
->version
== 1) {
336 writel(0, vm_dev
->base
+ VIRTIO_MMIO_QUEUE_PFN
);
338 writel(0, vm_dev
->base
+ VIRTIO_MMIO_QUEUE_READY
);
339 WARN_ON(readl(vm_dev
->base
+ VIRTIO_MMIO_QUEUE_READY
));
342 size
= PAGE_ALIGN(vring_size(info
->num
, VIRTIO_MMIO_VRING_ALIGN
));
343 free_pages_exact(info
->queue
, size
);
347 static void vm_del_vqs(struct virtio_device
*vdev
)
349 struct virtio_mmio_device
*vm_dev
= to_virtio_mmio_device(vdev
);
350 struct virtqueue
*vq
, *n
;
352 list_for_each_entry_safe(vq
, n
, &vdev
->vqs
, list
)
355 free_irq(platform_get_irq(vm_dev
->pdev
, 0), vm_dev
);
360 static struct virtqueue
*vm_setup_vq(struct virtio_device
*vdev
, unsigned index
,
361 void (*callback
)(struct virtqueue
*vq
),
364 struct virtio_mmio_device
*vm_dev
= to_virtio_mmio_device(vdev
);
365 struct virtio_mmio_vq_info
*info
;
366 struct virtqueue
*vq
;
367 unsigned long flags
, size
;
373 /* Select the queue we're interested in */
374 writel(index
, vm_dev
->base
+ VIRTIO_MMIO_QUEUE_SEL
);
376 /* Queue shouldn't already be set up. */
377 if (readl(vm_dev
->base
+ (vm_dev
->version
== 1 ?
378 VIRTIO_MMIO_QUEUE_PFN
: VIRTIO_MMIO_QUEUE_READY
))) {
380 goto error_available
;
383 /* Allocate and fill out our active queue description */
384 info
= kmalloc(sizeof(*info
), GFP_KERNEL
);
390 /* Allocate pages for the queue - start with a queue as big as
391 * possible (limited by maximum size allowed by device), drop down
392 * to a minimal size, just big enough to fit descriptor table
393 * and two rings (which makes it "alignment_size * 2")
395 info
->num
= readl(vm_dev
->base
+ VIRTIO_MMIO_QUEUE_NUM_MAX
);
397 /* If the device reports a 0 entry queue, we won't be able to
398 * use it to perform I/O, and vring_new_virtqueue() can't create
399 * empty queues anyway, so don't bother to set up the device.
401 if (info
->num
== 0) {
403 goto error_alloc_pages
;
407 size
= PAGE_ALIGN(vring_size(info
->num
,
408 VIRTIO_MMIO_VRING_ALIGN
));
409 /* Did the last iter shrink the queue below minimum size? */
410 if (size
< VIRTIO_MMIO_VRING_ALIGN
* 2) {
412 goto error_alloc_pages
;
415 info
->queue
= alloc_pages_exact(size
, GFP_KERNEL
| __GFP_ZERO
);
422 /* Create the vring */
423 vq
= vring_new_virtqueue(index
, info
->num
, VIRTIO_MMIO_VRING_ALIGN
, vdev
,
424 true, info
->queue
, vm_notify
, callback
, name
);
427 goto error_new_virtqueue
;
430 /* Activate the queue */
431 writel(info
->num
, vm_dev
->base
+ VIRTIO_MMIO_QUEUE_NUM
);
432 if (vm_dev
->version
== 1) {
433 writel(PAGE_SIZE
, vm_dev
->base
+ VIRTIO_MMIO_QUEUE_ALIGN
);
434 writel(virt_to_phys(info
->queue
) >> PAGE_SHIFT
,
435 vm_dev
->base
+ VIRTIO_MMIO_QUEUE_PFN
);
439 addr
= virt_to_phys(info
->queue
);
440 writel((u32
)addr
, vm_dev
->base
+ VIRTIO_MMIO_QUEUE_DESC_LOW
);
441 writel((u32
)(addr
>> 32),
442 vm_dev
->base
+ VIRTIO_MMIO_QUEUE_DESC_HIGH
);
444 addr
= virt_to_phys(virtqueue_get_avail(vq
));
445 writel((u32
)addr
, vm_dev
->base
+ VIRTIO_MMIO_QUEUE_AVAIL_LOW
);
446 writel((u32
)(addr
>> 32),
447 vm_dev
->base
+ VIRTIO_MMIO_QUEUE_AVAIL_HIGH
);
449 addr
= virt_to_phys(virtqueue_get_used(vq
));
450 writel((u32
)addr
, vm_dev
->base
+ VIRTIO_MMIO_QUEUE_USED_LOW
);
451 writel((u32
)(addr
>> 32),
452 vm_dev
->base
+ VIRTIO_MMIO_QUEUE_USED_HIGH
);
454 writel(1, vm_dev
->base
+ VIRTIO_MMIO_QUEUE_READY
);
460 spin_lock_irqsave(&vm_dev
->lock
, flags
);
461 list_add(&info
->node
, &vm_dev
->virtqueues
);
462 spin_unlock_irqrestore(&vm_dev
->lock
, flags
);
467 if (vm_dev
->version
== 1) {
468 writel(0, vm_dev
->base
+ VIRTIO_MMIO_QUEUE_PFN
);
470 writel(0, vm_dev
->base
+ VIRTIO_MMIO_QUEUE_READY
);
471 WARN_ON(readl(vm_dev
->base
+ VIRTIO_MMIO_QUEUE_READY
));
473 free_pages_exact(info
->queue
, size
);
481 static int vm_find_vqs(struct virtio_device
*vdev
, unsigned nvqs
,
482 struct virtqueue
*vqs
[],
483 vq_callback_t
*callbacks
[],
486 struct virtio_mmio_device
*vm_dev
= to_virtio_mmio_device(vdev
);
487 unsigned int irq
= platform_get_irq(vm_dev
->pdev
, 0);
490 err
= request_irq(irq
, vm_interrupt
, IRQF_SHARED
,
491 dev_name(&vdev
->dev
), vm_dev
);
495 for (i
= 0; i
< nvqs
; ++i
) {
496 vqs
[i
] = vm_setup_vq(vdev
, i
, callbacks
[i
], names
[i
]);
497 if (IS_ERR(vqs
[i
])) {
499 return PTR_ERR(vqs
[i
]);
506 static const char *vm_bus_name(struct virtio_device
*vdev
)
508 struct virtio_mmio_device
*vm_dev
= to_virtio_mmio_device(vdev
);
510 return vm_dev
->pdev
->name
;
513 static const struct virtio_config_ops virtio_mmio_config_ops
= {
516 .generation
= vm_generation
,
517 .get_status
= vm_get_status
,
518 .set_status
= vm_set_status
,
520 .find_vqs
= vm_find_vqs
,
521 .del_vqs
= vm_del_vqs
,
522 .get_features
= vm_get_features
,
523 .finalize_features
= vm_finalize_features
,
524 .bus_name
= vm_bus_name
,
529 /* Platform device */
531 static int virtio_mmio_probe(struct platform_device
*pdev
)
533 struct virtio_mmio_device
*vm_dev
;
534 struct resource
*mem
;
537 mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
541 if (!devm_request_mem_region(&pdev
->dev
, mem
->start
,
542 resource_size(mem
), pdev
->name
))
545 vm_dev
= devm_kzalloc(&pdev
->dev
, sizeof(*vm_dev
), GFP_KERNEL
);
549 vm_dev
->vdev
.dev
.parent
= &pdev
->dev
;
550 vm_dev
->vdev
.config
= &virtio_mmio_config_ops
;
552 INIT_LIST_HEAD(&vm_dev
->virtqueues
);
553 spin_lock_init(&vm_dev
->lock
);
555 vm_dev
->base
= devm_ioremap(&pdev
->dev
, mem
->start
, resource_size(mem
));
556 if (vm_dev
->base
== NULL
)
559 /* Check magic value */
560 magic
= readl(vm_dev
->base
+ VIRTIO_MMIO_MAGIC_VALUE
);
561 if (magic
!= ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
562 dev_warn(&pdev
->dev
, "Wrong magic value 0x%08lx!\n", magic
);
566 /* Check device version */
567 vm_dev
->version
= readl(vm_dev
->base
+ VIRTIO_MMIO_VERSION
);
568 if (vm_dev
->version
< 1 || vm_dev
->version
> 2) {
569 dev_err(&pdev
->dev
, "Version %ld not supported!\n",
574 vm_dev
->vdev
.id
.device
= readl(vm_dev
->base
+ VIRTIO_MMIO_DEVICE_ID
);
575 if (vm_dev
->vdev
.id
.device
== 0) {
577 * virtio-mmio device with an ID 0 is a (dummy) placeholder
578 * with no function. End probing now with no error reported.
582 vm_dev
->vdev
.id
.vendor
= readl(vm_dev
->base
+ VIRTIO_MMIO_VENDOR_ID
);
584 /* Reject legacy-only IDs for version 2 devices */
585 if (vm_dev
->version
== 2 &&
586 virtio_device_is_legacy_only(vm_dev
->vdev
.id
)) {
587 dev_err(&pdev
->dev
, "Version 2 not supported for devices %u!\n",
588 vm_dev
->vdev
.id
.device
);
592 if (vm_dev
->version
== 1)
593 writel(PAGE_SIZE
, vm_dev
->base
+ VIRTIO_MMIO_GUEST_PAGE_SIZE
);
595 platform_set_drvdata(pdev
, vm_dev
);
597 return register_virtio_device(&vm_dev
->vdev
);
600 static int virtio_mmio_remove(struct platform_device
*pdev
)
602 struct virtio_mmio_device
*vm_dev
= platform_get_drvdata(pdev
);
604 unregister_virtio_device(&vm_dev
->vdev
);
611 /* Devices list parameter */
613 #if defined(CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES)
615 static struct device vm_cmdline_parent
= {
616 .init_name
= "virtio-mmio-cmdline",
619 static int vm_cmdline_parent_registered
;
620 static int vm_cmdline_id
;
622 static int vm_cmdline_set(const char *device
,
623 const struct kernel_param
*kp
)
626 struct resource resources
[2] = {};
628 long long int base
, size
;
630 int processed
, consumed
= 0;
631 struct platform_device
*pdev
;
633 /* Consume "size" part of the command line parameter */
634 size
= memparse(device
, &str
);
636 /* Get "@<base>:<irq>[:<id>]" chunks */
637 processed
= sscanf(str
, "@%lli:%u%n:%d%n",
638 &base
, &irq
, &consumed
,
639 &vm_cmdline_id
, &consumed
);
642 * sscanf() must processes at least 2 chunks; also there
643 * must be no extra characters after the last chunk, so
644 * str[consumed] must be '\0'
646 if (processed
< 2 || str
[consumed
])
649 resources
[0].flags
= IORESOURCE_MEM
;
650 resources
[0].start
= base
;
651 resources
[0].end
= base
+ size
- 1;
653 resources
[1].flags
= IORESOURCE_IRQ
;
654 resources
[1].start
= resources
[1].end
= irq
;
656 if (!vm_cmdline_parent_registered
) {
657 err
= device_register(&vm_cmdline_parent
);
659 pr_err("Failed to register parent device!\n");
662 vm_cmdline_parent_registered
= 1;
665 pr_info("Registering device virtio-mmio.%d at 0x%llx-0x%llx, IRQ %d.\n",
667 (unsigned long long)resources
[0].start
,
668 (unsigned long long)resources
[0].end
,
669 (int)resources
[1].start
);
671 pdev
= platform_device_register_resndata(&vm_cmdline_parent
,
672 "virtio-mmio", vm_cmdline_id
++,
673 resources
, ARRAY_SIZE(resources
), NULL
, 0);
675 return PTR_ERR(pdev
);
680 static int vm_cmdline_get_device(struct device
*dev
, void *data
)
683 unsigned int len
= strlen(buffer
);
684 struct platform_device
*pdev
= to_platform_device(dev
);
686 snprintf(buffer
+ len
, PAGE_SIZE
- len
, "0x%llx@0x%llx:%llu:%d\n",
687 pdev
->resource
[0].end
- pdev
->resource
[0].start
+ 1ULL,
688 (unsigned long long)pdev
->resource
[0].start
,
689 (unsigned long long)pdev
->resource
[1].start
,
694 static int vm_cmdline_get(char *buffer
, const struct kernel_param
*kp
)
697 device_for_each_child(&vm_cmdline_parent
, buffer
,
698 vm_cmdline_get_device
);
699 return strlen(buffer
) + 1;
702 static struct kernel_param_ops vm_cmdline_param_ops
= {
703 .set
= vm_cmdline_set
,
704 .get
= vm_cmdline_get
,
707 device_param_cb(device
, &vm_cmdline_param_ops
, NULL
, S_IRUSR
);
709 static int vm_unregister_cmdline_device(struct device
*dev
,
712 platform_device_unregister(to_platform_device(dev
));
717 static void vm_unregister_cmdline_devices(void)
719 if (vm_cmdline_parent_registered
) {
720 device_for_each_child(&vm_cmdline_parent
, NULL
,
721 vm_unregister_cmdline_device
);
722 device_unregister(&vm_cmdline_parent
);
723 vm_cmdline_parent_registered
= 0;
729 static void vm_unregister_cmdline_devices(void)
735 /* Platform driver */
737 static struct of_device_id virtio_mmio_match
[] = {
738 { .compatible
= "virtio,mmio", },
741 MODULE_DEVICE_TABLE(of
, virtio_mmio_match
);
743 static struct platform_driver virtio_mmio_driver
= {
744 .probe
= virtio_mmio_probe
,
745 .remove
= virtio_mmio_remove
,
747 .name
= "virtio-mmio",
748 .of_match_table
= virtio_mmio_match
,
752 static int __init
virtio_mmio_init(void)
754 return platform_driver_register(&virtio_mmio_driver
);
757 static void __exit
virtio_mmio_exit(void)
759 platform_driver_unregister(&virtio_mmio_driver
);
760 vm_unregister_cmdline_devices();
763 module_init(virtio_mmio_init
);
764 module_exit(virtio_mmio_exit
);
766 MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
767 MODULE_DESCRIPTION("Platform bus driver for memory mapped virtio devices");
768 MODULE_LICENSE("GPL");