2 * Copyright (C) 2013 - Virtual Open Systems
3 * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
15 #include <linux/device.h>
16 #include <linux/acpi.h>
17 #include <linux/iommu.h>
18 #include <linux/module.h>
19 #include <linux/mutex.h>
20 #include <linux/slab.h>
21 #include <linux/types.h>
22 #include <linux/uaccess.h>
23 #include <linux/vfio.h>
25 #include "vfio_platform_private.h"
27 #define DRIVER_VERSION "0.10"
28 #define DRIVER_AUTHOR "Antonios Motakis <a.motakis@virtualopensystems.com>"
29 #define DRIVER_DESC "VFIO platform base module"
31 #define VFIO_PLATFORM_IS_ACPI(vdev) ((vdev)->acpihid != NULL)
33 static LIST_HEAD(reset_list
);
34 static DEFINE_MUTEX(driver_lock
);
36 static vfio_platform_reset_fn_t
vfio_platform_lookup_reset(const char *compat
,
37 struct module
**module
)
39 struct vfio_platform_reset_node
*iter
;
40 vfio_platform_reset_fn_t reset_fn
= NULL
;
42 mutex_lock(&driver_lock
);
43 list_for_each_entry(iter
, &reset_list
, link
) {
44 if (!strcmp(iter
->compat
, compat
) &&
45 try_module_get(iter
->owner
)) {
46 *module
= iter
->owner
;
47 reset_fn
= iter
->of_reset
;
51 mutex_unlock(&driver_lock
);
55 static int vfio_platform_acpi_probe(struct vfio_platform_device
*vdev
,
58 struct acpi_device
*adev
;
63 adev
= ACPI_COMPANION(dev
);
65 pr_err("VFIO: ACPI companion device not found for %s\n",
71 vdev
->acpihid
= acpi_device_hid(adev
);
73 return WARN_ON(!vdev
->acpihid
) ? -EINVAL
: 0;
76 static int vfio_platform_acpi_call_reset(struct vfio_platform_device
*vdev
,
77 const char **extra_dbg
)
80 struct acpi_buffer buffer
= { ACPI_ALLOCATE_BUFFER
, NULL
};
81 struct device
*dev
= vdev
->device
;
82 acpi_handle handle
= ACPI_HANDLE(dev
);
85 acpi_ret
= acpi_evaluate_object(handle
, "_RST", NULL
, &buffer
);
86 if (ACPI_FAILURE(acpi_ret
)) {
88 *extra_dbg
= acpi_format_exception(acpi_ret
);
98 static bool vfio_platform_acpi_has_reset(struct vfio_platform_device
*vdev
)
101 struct device
*dev
= vdev
->device
;
102 acpi_handle handle
= ACPI_HANDLE(dev
);
104 return acpi_has_method(handle
, "_RST");
110 static bool vfio_platform_has_reset(struct vfio_platform_device
*vdev
)
112 if (VFIO_PLATFORM_IS_ACPI(vdev
))
113 return vfio_platform_acpi_has_reset(vdev
);
115 return vdev
->of_reset
? true : false;
118 static int vfio_platform_get_reset(struct vfio_platform_device
*vdev
)
120 if (VFIO_PLATFORM_IS_ACPI(vdev
))
121 return vfio_platform_acpi_has_reset(vdev
) ? 0 : -ENOENT
;
123 vdev
->of_reset
= vfio_platform_lookup_reset(vdev
->compat
,
124 &vdev
->reset_module
);
125 if (!vdev
->of_reset
) {
126 request_module("vfio-reset:%s", vdev
->compat
);
127 vdev
->of_reset
= vfio_platform_lookup_reset(vdev
->compat
,
128 &vdev
->reset_module
);
131 return vdev
->of_reset
? 0 : -ENOENT
;
134 static void vfio_platform_put_reset(struct vfio_platform_device
*vdev
)
136 if (VFIO_PLATFORM_IS_ACPI(vdev
))
140 module_put(vdev
->reset_module
);
143 static int vfio_platform_regions_init(struct vfio_platform_device
*vdev
)
147 while (vdev
->get_resource(vdev
, cnt
))
150 vdev
->regions
= kcalloc(cnt
, sizeof(struct vfio_platform_region
),
155 for (i
= 0; i
< cnt
; i
++) {
156 struct resource
*res
=
157 vdev
->get_resource(vdev
, i
);
162 vdev
->regions
[i
].addr
= res
->start
;
163 vdev
->regions
[i
].size
= resource_size(res
);
164 vdev
->regions
[i
].flags
= 0;
166 switch (resource_type(res
)) {
168 vdev
->regions
[i
].type
= VFIO_PLATFORM_REGION_TYPE_MMIO
;
169 vdev
->regions
[i
].flags
|= VFIO_REGION_INFO_FLAG_READ
;
170 if (!(res
->flags
& IORESOURCE_READONLY
))
171 vdev
->regions
[i
].flags
|=
172 VFIO_REGION_INFO_FLAG_WRITE
;
175 * Only regions addressed with PAGE granularity may be
178 if (!(vdev
->regions
[i
].addr
& ~PAGE_MASK
) &&
179 !(vdev
->regions
[i
].size
& ~PAGE_MASK
))
180 vdev
->regions
[i
].flags
|=
181 VFIO_REGION_INFO_FLAG_MMAP
;
185 vdev
->regions
[i
].type
= VFIO_PLATFORM_REGION_TYPE_PIO
;
192 vdev
->num_regions
= cnt
;
196 kfree(vdev
->regions
);
200 static void vfio_platform_regions_cleanup(struct vfio_platform_device
*vdev
)
204 for (i
= 0; i
< vdev
->num_regions
; i
++)
205 iounmap(vdev
->regions
[i
].ioaddr
);
207 vdev
->num_regions
= 0;
208 kfree(vdev
->regions
);
211 static int vfio_platform_call_reset(struct vfio_platform_device
*vdev
,
212 const char **extra_dbg
)
214 if (VFIO_PLATFORM_IS_ACPI(vdev
)) {
215 dev_info(vdev
->device
, "reset\n");
216 return vfio_platform_acpi_call_reset(vdev
, extra_dbg
);
217 } else if (vdev
->of_reset
) {
218 dev_info(vdev
->device
, "reset\n");
219 return vdev
->of_reset(vdev
);
222 dev_warn(vdev
->device
, "no reset function found!\n");
226 static void vfio_platform_release(void *device_data
)
228 struct vfio_platform_device
*vdev
= device_data
;
230 mutex_lock(&driver_lock
);
232 if (!(--vdev
->refcnt
)) {
233 const char *extra_dbg
= NULL
;
236 ret
= vfio_platform_call_reset(vdev
, &extra_dbg
);
237 if (ret
&& vdev
->reset_required
) {
238 dev_warn(vdev
->device
, "reset driver is required and reset call failed in release (%d) %s\n",
239 ret
, extra_dbg
? extra_dbg
: "");
242 vfio_platform_regions_cleanup(vdev
);
243 vfio_platform_irq_cleanup(vdev
);
246 mutex_unlock(&driver_lock
);
248 module_put(vdev
->parent_module
);
251 static int vfio_platform_open(void *device_data
)
253 struct vfio_platform_device
*vdev
= device_data
;
256 if (!try_module_get(vdev
->parent_module
))
259 mutex_lock(&driver_lock
);
262 const char *extra_dbg
= NULL
;
264 ret
= vfio_platform_regions_init(vdev
);
268 ret
= vfio_platform_irq_init(vdev
);
272 ret
= vfio_platform_call_reset(vdev
, &extra_dbg
);
273 if (ret
&& vdev
->reset_required
) {
274 dev_warn(vdev
->device
, "reset driver is required and reset call failed in open (%d) %s\n",
275 ret
, extra_dbg
? extra_dbg
: "");
282 mutex_unlock(&driver_lock
);
286 vfio_platform_irq_cleanup(vdev
);
288 vfio_platform_regions_cleanup(vdev
);
290 mutex_unlock(&driver_lock
);
291 module_put(THIS_MODULE
);
295 static long vfio_platform_ioctl(void *device_data
,
296 unsigned int cmd
, unsigned long arg
)
298 struct vfio_platform_device
*vdev
= device_data
;
301 if (cmd
== VFIO_DEVICE_GET_INFO
) {
302 struct vfio_device_info info
;
304 minsz
= offsetofend(struct vfio_device_info
, num_irqs
);
306 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
309 if (info
.argsz
< minsz
)
312 if (vfio_platform_has_reset(vdev
))
313 vdev
->flags
|= VFIO_DEVICE_FLAGS_RESET
;
314 info
.flags
= vdev
->flags
;
315 info
.num_regions
= vdev
->num_regions
;
316 info
.num_irqs
= vdev
->num_irqs
;
318 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
321 } else if (cmd
== VFIO_DEVICE_GET_REGION_INFO
) {
322 struct vfio_region_info info
;
324 minsz
= offsetofend(struct vfio_region_info
, offset
);
326 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
329 if (info
.argsz
< minsz
)
332 if (info
.index
>= vdev
->num_regions
)
335 /* map offset to the physical address */
336 info
.offset
= VFIO_PLATFORM_INDEX_TO_OFFSET(info
.index
);
337 info
.size
= vdev
->regions
[info
.index
].size
;
338 info
.flags
= vdev
->regions
[info
.index
].flags
;
340 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
343 } else if (cmd
== VFIO_DEVICE_GET_IRQ_INFO
) {
344 struct vfio_irq_info info
;
346 minsz
= offsetofend(struct vfio_irq_info
, count
);
348 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
351 if (info
.argsz
< minsz
)
354 if (info
.index
>= vdev
->num_irqs
)
357 info
.flags
= vdev
->irqs
[info
.index
].flags
;
358 info
.count
= vdev
->irqs
[info
.index
].count
;
360 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
363 } else if (cmd
== VFIO_DEVICE_SET_IRQS
) {
364 struct vfio_irq_set hdr
;
368 minsz
= offsetofend(struct vfio_irq_set
, count
);
370 if (copy_from_user(&hdr
, (void __user
*)arg
, minsz
))
373 if (hdr
.argsz
< minsz
)
376 if (hdr
.index
>= vdev
->num_irqs
)
379 if (hdr
.flags
& ~(VFIO_IRQ_SET_DATA_TYPE_MASK
|
380 VFIO_IRQ_SET_ACTION_TYPE_MASK
))
383 if (!(hdr
.flags
& VFIO_IRQ_SET_DATA_NONE
)) {
386 if (hdr
.flags
& VFIO_IRQ_SET_DATA_BOOL
)
387 size
= sizeof(uint8_t);
388 else if (hdr
.flags
& VFIO_IRQ_SET_DATA_EVENTFD
)
389 size
= sizeof(int32_t);
393 if (hdr
.argsz
- minsz
< size
)
396 data
= memdup_user((void __user
*)(arg
+ minsz
), size
);
398 return PTR_ERR(data
);
401 mutex_lock(&vdev
->igate
);
403 ret
= vfio_platform_set_irqs_ioctl(vdev
, hdr
.flags
, hdr
.index
,
404 hdr
.start
, hdr
.count
, data
);
405 mutex_unlock(&vdev
->igate
);
410 } else if (cmd
== VFIO_DEVICE_RESET
) {
411 return vfio_platform_call_reset(vdev
, NULL
);
417 static ssize_t
vfio_platform_read_mmio(struct vfio_platform_region
*reg
,
418 char __user
*buf
, size_t count
,
421 unsigned int done
= 0;
425 ioremap_nocache(reg
->addr
, reg
->size
);
434 if (count
>= 4 && !(off
% 4)) {
437 val
= ioread32(reg
->ioaddr
+ off
);
438 if (copy_to_user(buf
, &val
, 4))
442 } else if (count
>= 2 && !(off
% 2)) {
445 val
= ioread16(reg
->ioaddr
+ off
);
446 if (copy_to_user(buf
, &val
, 2))
453 val
= ioread8(reg
->ioaddr
+ off
);
454 if (copy_to_user(buf
, &val
, 1))
472 static ssize_t
vfio_platform_read(void *device_data
, char __user
*buf
,
473 size_t count
, loff_t
*ppos
)
475 struct vfio_platform_device
*vdev
= device_data
;
476 unsigned int index
= VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos
);
477 loff_t off
= *ppos
& VFIO_PLATFORM_OFFSET_MASK
;
479 if (index
>= vdev
->num_regions
)
482 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_READ
))
485 if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_MMIO
)
486 return vfio_platform_read_mmio(&vdev
->regions
[index
],
488 else if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_PIO
)
489 return -EINVAL
; /* not implemented */
494 static ssize_t
vfio_platform_write_mmio(struct vfio_platform_region
*reg
,
495 const char __user
*buf
, size_t count
,
498 unsigned int done
= 0;
502 ioremap_nocache(reg
->addr
, reg
->size
);
511 if (count
>= 4 && !(off
% 4)) {
514 if (copy_from_user(&val
, buf
, 4))
516 iowrite32(val
, reg
->ioaddr
+ off
);
519 } else if (count
>= 2 && !(off
% 2)) {
522 if (copy_from_user(&val
, buf
, 2))
524 iowrite16(val
, reg
->ioaddr
+ off
);
530 if (copy_from_user(&val
, buf
, 1))
532 iowrite8(val
, reg
->ioaddr
+ off
);
548 static ssize_t
vfio_platform_write(void *device_data
, const char __user
*buf
,
549 size_t count
, loff_t
*ppos
)
551 struct vfio_platform_device
*vdev
= device_data
;
552 unsigned int index
= VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos
);
553 loff_t off
= *ppos
& VFIO_PLATFORM_OFFSET_MASK
;
555 if (index
>= vdev
->num_regions
)
558 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_WRITE
))
561 if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_MMIO
)
562 return vfio_platform_write_mmio(&vdev
->regions
[index
],
564 else if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_PIO
)
565 return -EINVAL
; /* not implemented */
570 static int vfio_platform_mmap_mmio(struct vfio_platform_region region
,
571 struct vm_area_struct
*vma
)
573 u64 req_len
, pgoff
, req_start
;
575 req_len
= vma
->vm_end
- vma
->vm_start
;
576 pgoff
= vma
->vm_pgoff
&
577 ((1U << (VFIO_PLATFORM_OFFSET_SHIFT
- PAGE_SHIFT
)) - 1);
578 req_start
= pgoff
<< PAGE_SHIFT
;
580 if (region
.size
< PAGE_SIZE
|| req_start
+ req_len
> region
.size
)
583 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
584 vma
->vm_pgoff
= (region
.addr
>> PAGE_SHIFT
) + pgoff
;
586 return remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
587 req_len
, vma
->vm_page_prot
);
590 static int vfio_platform_mmap(void *device_data
, struct vm_area_struct
*vma
)
592 struct vfio_platform_device
*vdev
= device_data
;
595 index
= vma
->vm_pgoff
>> (VFIO_PLATFORM_OFFSET_SHIFT
- PAGE_SHIFT
);
597 if (vma
->vm_end
< vma
->vm_start
)
599 if (!(vma
->vm_flags
& VM_SHARED
))
601 if (index
>= vdev
->num_regions
)
603 if (vma
->vm_start
& ~PAGE_MASK
)
605 if (vma
->vm_end
& ~PAGE_MASK
)
608 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_MMAP
))
611 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_READ
)
612 && (vma
->vm_flags
& VM_READ
))
615 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_WRITE
)
616 && (vma
->vm_flags
& VM_WRITE
))
619 vma
->vm_private_data
= vdev
;
621 if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_MMIO
)
622 return vfio_platform_mmap_mmio(vdev
->regions
[index
], vma
);
624 else if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_PIO
)
625 return -EINVAL
; /* not implemented */
630 static const struct vfio_device_ops vfio_platform_ops
= {
631 .name
= "vfio-platform",
632 .open
= vfio_platform_open
,
633 .release
= vfio_platform_release
,
634 .ioctl
= vfio_platform_ioctl
,
635 .read
= vfio_platform_read
,
636 .write
= vfio_platform_write
,
637 .mmap
= vfio_platform_mmap
,
640 static int vfio_platform_of_probe(struct vfio_platform_device
*vdev
,
645 ret
= device_property_read_string(dev
, "compatible",
648 pr_err("VFIO: cannot retrieve compat for %s\n",
655 * There can be two kernel build combinations. One build where
656 * ACPI is not selected in Kconfig and another one with the ACPI Kconfig.
658 * In the first case, vfio_platform_acpi_probe will return since
659 * acpi_disabled is 1. DT user will not see any kind of messages from
662 * In the second case, both DT and ACPI is compiled in but the system is
663 * booting with any of these combinations.
665 * If the firmware is DT type, then acpi_disabled is 1. The ACPI probe routine
666 * terminates immediately without any messages.
668 * If the firmware is ACPI type, then acpi_disabled is 0. All other checks are
669 * valid checks. We cannot claim that this system is DT.
671 int vfio_platform_probe_common(struct vfio_platform_device
*vdev
,
674 struct iommu_group
*group
;
680 ret
= vfio_platform_acpi_probe(vdev
, dev
);
682 ret
= vfio_platform_of_probe(vdev
, dev
);
689 ret
= vfio_platform_get_reset(vdev
);
690 if (ret
&& vdev
->reset_required
) {
691 pr_err("vfio: no reset function found for device %s\n",
696 group
= vfio_iommu_group_get(dev
);
698 pr_err("VFIO: No IOMMU group for device %s\n", vdev
->name
);
702 ret
= vfio_add_group_dev(dev
, &vfio_platform_ops
, vdev
);
704 vfio_iommu_group_put(group
, dev
);
708 mutex_init(&vdev
->igate
);
712 EXPORT_SYMBOL_GPL(vfio_platform_probe_common
);
714 struct vfio_platform_device
*vfio_platform_remove_common(struct device
*dev
)
716 struct vfio_platform_device
*vdev
;
718 vdev
= vfio_del_group_dev(dev
);
721 vfio_platform_put_reset(vdev
);
722 vfio_iommu_group_put(dev
->iommu_group
, dev
);
727 EXPORT_SYMBOL_GPL(vfio_platform_remove_common
);
729 void __vfio_platform_register_reset(struct vfio_platform_reset_node
*node
)
731 mutex_lock(&driver_lock
);
732 list_add(&node
->link
, &reset_list
);
733 mutex_unlock(&driver_lock
);
735 EXPORT_SYMBOL_GPL(__vfio_platform_register_reset
);
737 void vfio_platform_unregister_reset(const char *compat
,
738 vfio_platform_reset_fn_t fn
)
740 struct vfio_platform_reset_node
*iter
, *temp
;
742 mutex_lock(&driver_lock
);
743 list_for_each_entry_safe(iter
, temp
, &reset_list
, link
) {
744 if (!strcmp(iter
->compat
, compat
) && (iter
->of_reset
== fn
)) {
745 list_del(&iter
->link
);
750 mutex_unlock(&driver_lock
);
753 EXPORT_SYMBOL_GPL(vfio_platform_unregister_reset
);
755 MODULE_VERSION(DRIVER_VERSION
);
756 MODULE_LICENSE("GPL v2");
757 MODULE_AUTHOR(DRIVER_AUTHOR
);
758 MODULE_DESCRIPTION(DRIVER_DESC
);