2 * Copyright (C) 2013 - Virtual Open Systems
3 * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
15 #include <linux/device.h>
16 #include <linux/acpi.h>
17 #include <linux/iommu.h>
18 #include <linux/module.h>
19 #include <linux/mutex.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/slab.h>
22 #include <linux/types.h>
23 #include <linux/uaccess.h>
24 #include <linux/vfio.h>
26 #include "vfio_platform_private.h"
28 #define DRIVER_VERSION "0.10"
29 #define DRIVER_AUTHOR "Antonios Motakis <a.motakis@virtualopensystems.com>"
30 #define DRIVER_DESC "VFIO platform base module"
32 #define VFIO_PLATFORM_IS_ACPI(vdev) ((vdev)->acpihid != NULL)
34 static LIST_HEAD(reset_list
);
35 static DEFINE_MUTEX(driver_lock
);
37 static vfio_platform_reset_fn_t
vfio_platform_lookup_reset(const char *compat
,
38 struct module
**module
)
40 struct vfio_platform_reset_node
*iter
;
41 vfio_platform_reset_fn_t reset_fn
= NULL
;
43 mutex_lock(&driver_lock
);
44 list_for_each_entry(iter
, &reset_list
, link
) {
45 if (!strcmp(iter
->compat
, compat
) &&
46 try_module_get(iter
->owner
)) {
47 *module
= iter
->owner
;
48 reset_fn
= iter
->of_reset
;
52 mutex_unlock(&driver_lock
);
56 static int vfio_platform_acpi_probe(struct vfio_platform_device
*vdev
,
59 struct acpi_device
*adev
;
64 adev
= ACPI_COMPANION(dev
);
66 pr_err("VFIO: ACPI companion device not found for %s\n",
72 vdev
->acpihid
= acpi_device_hid(adev
);
74 return WARN_ON(!vdev
->acpihid
) ? -EINVAL
: 0;
77 static int vfio_platform_acpi_call_reset(struct vfio_platform_device
*vdev
,
78 const char **extra_dbg
)
81 struct acpi_buffer buffer
= { ACPI_ALLOCATE_BUFFER
, NULL
};
82 struct device
*dev
= vdev
->device
;
83 acpi_handle handle
= ACPI_HANDLE(dev
);
86 acpi_ret
= acpi_evaluate_object(handle
, "_RST", NULL
, &buffer
);
87 if (ACPI_FAILURE(acpi_ret
)) {
89 *extra_dbg
= acpi_format_exception(acpi_ret
);
99 static bool vfio_platform_acpi_has_reset(struct vfio_platform_device
*vdev
)
102 struct device
*dev
= vdev
->device
;
103 acpi_handle handle
= ACPI_HANDLE(dev
);
105 return acpi_has_method(handle
, "_RST");
111 static bool vfio_platform_has_reset(struct vfio_platform_device
*vdev
)
113 if (VFIO_PLATFORM_IS_ACPI(vdev
))
114 return vfio_platform_acpi_has_reset(vdev
);
116 return vdev
->of_reset
? true : false;
119 static int vfio_platform_get_reset(struct vfio_platform_device
*vdev
)
121 if (VFIO_PLATFORM_IS_ACPI(vdev
))
122 return vfio_platform_acpi_has_reset(vdev
) ? 0 : -ENOENT
;
124 vdev
->of_reset
= vfio_platform_lookup_reset(vdev
->compat
,
125 &vdev
->reset_module
);
126 if (!vdev
->of_reset
) {
127 request_module("vfio-reset:%s", vdev
->compat
);
128 vdev
->of_reset
= vfio_platform_lookup_reset(vdev
->compat
,
129 &vdev
->reset_module
);
132 return vdev
->of_reset
? 0 : -ENOENT
;
135 static void vfio_platform_put_reset(struct vfio_platform_device
*vdev
)
137 if (VFIO_PLATFORM_IS_ACPI(vdev
))
141 module_put(vdev
->reset_module
);
144 static int vfio_platform_regions_init(struct vfio_platform_device
*vdev
)
148 while (vdev
->get_resource(vdev
, cnt
))
151 vdev
->regions
= kcalloc(cnt
, sizeof(struct vfio_platform_region
),
156 for (i
= 0; i
< cnt
; i
++) {
157 struct resource
*res
=
158 vdev
->get_resource(vdev
, i
);
163 vdev
->regions
[i
].addr
= res
->start
;
164 vdev
->regions
[i
].size
= resource_size(res
);
165 vdev
->regions
[i
].flags
= 0;
167 switch (resource_type(res
)) {
169 vdev
->regions
[i
].type
= VFIO_PLATFORM_REGION_TYPE_MMIO
;
170 vdev
->regions
[i
].flags
|= VFIO_REGION_INFO_FLAG_READ
;
171 if (!(res
->flags
& IORESOURCE_READONLY
))
172 vdev
->regions
[i
].flags
|=
173 VFIO_REGION_INFO_FLAG_WRITE
;
176 * Only regions addressed with PAGE granularity may be
179 if (!(vdev
->regions
[i
].addr
& ~PAGE_MASK
) &&
180 !(vdev
->regions
[i
].size
& ~PAGE_MASK
))
181 vdev
->regions
[i
].flags
|=
182 VFIO_REGION_INFO_FLAG_MMAP
;
186 vdev
->regions
[i
].type
= VFIO_PLATFORM_REGION_TYPE_PIO
;
193 vdev
->num_regions
= cnt
;
197 kfree(vdev
->regions
);
201 static void vfio_platform_regions_cleanup(struct vfio_platform_device
*vdev
)
205 for (i
= 0; i
< vdev
->num_regions
; i
++)
206 iounmap(vdev
->regions
[i
].ioaddr
);
208 vdev
->num_regions
= 0;
209 kfree(vdev
->regions
);
212 static int vfio_platform_call_reset(struct vfio_platform_device
*vdev
,
213 const char **extra_dbg
)
215 if (VFIO_PLATFORM_IS_ACPI(vdev
)) {
216 dev_info(vdev
->device
, "reset\n");
217 return vfio_platform_acpi_call_reset(vdev
, extra_dbg
);
218 } else if (vdev
->of_reset
) {
219 dev_info(vdev
->device
, "reset\n");
220 return vdev
->of_reset(vdev
);
223 dev_warn(vdev
->device
, "no reset function found!\n");
227 static void vfio_platform_release(void *device_data
)
229 struct vfio_platform_device
*vdev
= device_data
;
231 mutex_lock(&driver_lock
);
233 if (!(--vdev
->refcnt
)) {
234 const char *extra_dbg
= NULL
;
237 ret
= vfio_platform_call_reset(vdev
, &extra_dbg
);
238 if (ret
&& vdev
->reset_required
) {
239 dev_warn(vdev
->device
, "reset driver is required and reset call failed in release (%d) %s\n",
240 ret
, extra_dbg
? extra_dbg
: "");
243 pm_runtime_put(vdev
->device
);
244 vfio_platform_regions_cleanup(vdev
);
245 vfio_platform_irq_cleanup(vdev
);
248 mutex_unlock(&driver_lock
);
250 module_put(vdev
->parent_module
);
253 static int vfio_platform_open(void *device_data
)
255 struct vfio_platform_device
*vdev
= device_data
;
258 if (!try_module_get(vdev
->parent_module
))
261 mutex_lock(&driver_lock
);
264 const char *extra_dbg
= NULL
;
266 ret
= vfio_platform_regions_init(vdev
);
270 ret
= vfio_platform_irq_init(vdev
);
274 ret
= pm_runtime_get_sync(vdev
->device
);
278 ret
= vfio_platform_call_reset(vdev
, &extra_dbg
);
279 if (ret
&& vdev
->reset_required
) {
280 dev_warn(vdev
->device
, "reset driver is required and reset call failed in open (%d) %s\n",
281 ret
, extra_dbg
? extra_dbg
: "");
288 mutex_unlock(&driver_lock
);
292 pm_runtime_put(vdev
->device
);
294 vfio_platform_irq_cleanup(vdev
);
296 vfio_platform_regions_cleanup(vdev
);
298 mutex_unlock(&driver_lock
);
299 module_put(THIS_MODULE
);
303 static long vfio_platform_ioctl(void *device_data
,
304 unsigned int cmd
, unsigned long arg
)
306 struct vfio_platform_device
*vdev
= device_data
;
309 if (cmd
== VFIO_DEVICE_GET_INFO
) {
310 struct vfio_device_info info
;
312 minsz
= offsetofend(struct vfio_device_info
, num_irqs
);
314 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
317 if (info
.argsz
< minsz
)
320 if (vfio_platform_has_reset(vdev
))
321 vdev
->flags
|= VFIO_DEVICE_FLAGS_RESET
;
322 info
.flags
= vdev
->flags
;
323 info
.num_regions
= vdev
->num_regions
;
324 info
.num_irqs
= vdev
->num_irqs
;
326 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
329 } else if (cmd
== VFIO_DEVICE_GET_REGION_INFO
) {
330 struct vfio_region_info info
;
332 minsz
= offsetofend(struct vfio_region_info
, offset
);
334 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
337 if (info
.argsz
< minsz
)
340 if (info
.index
>= vdev
->num_regions
)
343 /* map offset to the physical address */
344 info
.offset
= VFIO_PLATFORM_INDEX_TO_OFFSET(info
.index
);
345 info
.size
= vdev
->regions
[info
.index
].size
;
346 info
.flags
= vdev
->regions
[info
.index
].flags
;
348 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
351 } else if (cmd
== VFIO_DEVICE_GET_IRQ_INFO
) {
352 struct vfio_irq_info info
;
354 minsz
= offsetofend(struct vfio_irq_info
, count
);
356 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
359 if (info
.argsz
< minsz
)
362 if (info
.index
>= vdev
->num_irqs
)
365 info
.flags
= vdev
->irqs
[info
.index
].flags
;
366 info
.count
= vdev
->irqs
[info
.index
].count
;
368 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
371 } else if (cmd
== VFIO_DEVICE_SET_IRQS
) {
372 struct vfio_irq_set hdr
;
375 size_t data_size
= 0;
377 minsz
= offsetofend(struct vfio_irq_set
, count
);
379 if (copy_from_user(&hdr
, (void __user
*)arg
, minsz
))
382 ret
= vfio_set_irqs_validate_and_prepare(&hdr
, vdev
->num_irqs
,
383 vdev
->num_irqs
, &data_size
);
388 data
= memdup_user((void __user
*)(arg
+ minsz
),
391 return PTR_ERR(data
);
394 mutex_lock(&vdev
->igate
);
396 ret
= vfio_platform_set_irqs_ioctl(vdev
, hdr
.flags
, hdr
.index
,
397 hdr
.start
, hdr
.count
, data
);
398 mutex_unlock(&vdev
->igate
);
403 } else if (cmd
== VFIO_DEVICE_RESET
) {
404 return vfio_platform_call_reset(vdev
, NULL
);
410 static ssize_t
vfio_platform_read_mmio(struct vfio_platform_region
*reg
,
411 char __user
*buf
, size_t count
,
414 unsigned int done
= 0;
418 ioremap_nocache(reg
->addr
, reg
->size
);
427 if (count
>= 4 && !(off
% 4)) {
430 val
= ioread32(reg
->ioaddr
+ off
);
431 if (copy_to_user(buf
, &val
, 4))
435 } else if (count
>= 2 && !(off
% 2)) {
438 val
= ioread16(reg
->ioaddr
+ off
);
439 if (copy_to_user(buf
, &val
, 2))
446 val
= ioread8(reg
->ioaddr
+ off
);
447 if (copy_to_user(buf
, &val
, 1))
465 static ssize_t
vfio_platform_read(void *device_data
, char __user
*buf
,
466 size_t count
, loff_t
*ppos
)
468 struct vfio_platform_device
*vdev
= device_data
;
469 unsigned int index
= VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos
);
470 loff_t off
= *ppos
& VFIO_PLATFORM_OFFSET_MASK
;
472 if (index
>= vdev
->num_regions
)
475 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_READ
))
478 if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_MMIO
)
479 return vfio_platform_read_mmio(&vdev
->regions
[index
],
481 else if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_PIO
)
482 return -EINVAL
; /* not implemented */
487 static ssize_t
vfio_platform_write_mmio(struct vfio_platform_region
*reg
,
488 const char __user
*buf
, size_t count
,
491 unsigned int done
= 0;
495 ioremap_nocache(reg
->addr
, reg
->size
);
504 if (count
>= 4 && !(off
% 4)) {
507 if (copy_from_user(&val
, buf
, 4))
509 iowrite32(val
, reg
->ioaddr
+ off
);
512 } else if (count
>= 2 && !(off
% 2)) {
515 if (copy_from_user(&val
, buf
, 2))
517 iowrite16(val
, reg
->ioaddr
+ off
);
523 if (copy_from_user(&val
, buf
, 1))
525 iowrite8(val
, reg
->ioaddr
+ off
);
541 static ssize_t
vfio_platform_write(void *device_data
, const char __user
*buf
,
542 size_t count
, loff_t
*ppos
)
544 struct vfio_platform_device
*vdev
= device_data
;
545 unsigned int index
= VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos
);
546 loff_t off
= *ppos
& VFIO_PLATFORM_OFFSET_MASK
;
548 if (index
>= vdev
->num_regions
)
551 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_WRITE
))
554 if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_MMIO
)
555 return vfio_platform_write_mmio(&vdev
->regions
[index
],
557 else if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_PIO
)
558 return -EINVAL
; /* not implemented */
563 static int vfio_platform_mmap_mmio(struct vfio_platform_region region
,
564 struct vm_area_struct
*vma
)
566 u64 req_len
, pgoff
, req_start
;
568 req_len
= vma
->vm_end
- vma
->vm_start
;
569 pgoff
= vma
->vm_pgoff
&
570 ((1U << (VFIO_PLATFORM_OFFSET_SHIFT
- PAGE_SHIFT
)) - 1);
571 req_start
= pgoff
<< PAGE_SHIFT
;
573 if (region
.size
< PAGE_SIZE
|| req_start
+ req_len
> region
.size
)
576 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
577 vma
->vm_pgoff
= (region
.addr
>> PAGE_SHIFT
) + pgoff
;
579 return remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
580 req_len
, vma
->vm_page_prot
);
583 static int vfio_platform_mmap(void *device_data
, struct vm_area_struct
*vma
)
585 struct vfio_platform_device
*vdev
= device_data
;
588 index
= vma
->vm_pgoff
>> (VFIO_PLATFORM_OFFSET_SHIFT
- PAGE_SHIFT
);
590 if (vma
->vm_end
< vma
->vm_start
)
592 if (!(vma
->vm_flags
& VM_SHARED
))
594 if (index
>= vdev
->num_regions
)
596 if (vma
->vm_start
& ~PAGE_MASK
)
598 if (vma
->vm_end
& ~PAGE_MASK
)
601 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_MMAP
))
604 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_READ
)
605 && (vma
->vm_flags
& VM_READ
))
608 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_WRITE
)
609 && (vma
->vm_flags
& VM_WRITE
))
612 vma
->vm_private_data
= vdev
;
614 if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_MMIO
)
615 return vfio_platform_mmap_mmio(vdev
->regions
[index
], vma
);
617 else if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_PIO
)
618 return -EINVAL
; /* not implemented */
623 static const struct vfio_device_ops vfio_platform_ops
= {
624 .name
= "vfio-platform",
625 .open
= vfio_platform_open
,
626 .release
= vfio_platform_release
,
627 .ioctl
= vfio_platform_ioctl
,
628 .read
= vfio_platform_read
,
629 .write
= vfio_platform_write
,
630 .mmap
= vfio_platform_mmap
,
633 static int vfio_platform_of_probe(struct vfio_platform_device
*vdev
,
638 ret
= device_property_read_string(dev
, "compatible",
641 pr_err("VFIO: Cannot retrieve compat for %s\n", vdev
->name
);
647 * There can be two kernel build combinations. One build where
648 * ACPI is not selected in Kconfig and another one with the ACPI Kconfig.
650 * In the first case, vfio_platform_acpi_probe will return since
651 * acpi_disabled is 1. DT user will not see any kind of messages from
654 * In the second case, both DT and ACPI is compiled in but the system is
655 * booting with any of these combinations.
657 * If the firmware is DT type, then acpi_disabled is 1. The ACPI probe routine
658 * terminates immediately without any messages.
660 * If the firmware is ACPI type, then acpi_disabled is 0. All other checks are
661 * valid checks. We cannot claim that this system is DT.
663 int vfio_platform_probe_common(struct vfio_platform_device
*vdev
,
666 struct iommu_group
*group
;
672 ret
= vfio_platform_acpi_probe(vdev
, dev
);
674 ret
= vfio_platform_of_probe(vdev
, dev
);
681 ret
= vfio_platform_get_reset(vdev
);
682 if (ret
&& vdev
->reset_required
) {
683 pr_err("VFIO: No reset function found for device %s\n",
688 group
= vfio_iommu_group_get(dev
);
690 pr_err("VFIO: No IOMMU group for device %s\n", vdev
->name
);
695 ret
= vfio_add_group_dev(dev
, &vfio_platform_ops
, vdev
);
699 mutex_init(&vdev
->igate
);
701 pm_runtime_enable(vdev
->device
);
705 vfio_iommu_group_put(group
, dev
);
707 vfio_platform_put_reset(vdev
);
710 EXPORT_SYMBOL_GPL(vfio_platform_probe_common
);
712 struct vfio_platform_device
*vfio_platform_remove_common(struct device
*dev
)
714 struct vfio_platform_device
*vdev
;
716 vdev
= vfio_del_group_dev(dev
);
719 pm_runtime_disable(vdev
->device
);
720 vfio_platform_put_reset(vdev
);
721 vfio_iommu_group_put(dev
->iommu_group
, dev
);
726 EXPORT_SYMBOL_GPL(vfio_platform_remove_common
);
728 void __vfio_platform_register_reset(struct vfio_platform_reset_node
*node
)
730 mutex_lock(&driver_lock
);
731 list_add(&node
->link
, &reset_list
);
732 mutex_unlock(&driver_lock
);
734 EXPORT_SYMBOL_GPL(__vfio_platform_register_reset
);
736 void vfio_platform_unregister_reset(const char *compat
,
737 vfio_platform_reset_fn_t fn
)
739 struct vfio_platform_reset_node
*iter
, *temp
;
741 mutex_lock(&driver_lock
);
742 list_for_each_entry_safe(iter
, temp
, &reset_list
, link
) {
743 if (!strcmp(iter
->compat
, compat
) && (iter
->of_reset
== fn
)) {
744 list_del(&iter
->link
);
749 mutex_unlock(&driver_lock
);
752 EXPORT_SYMBOL_GPL(vfio_platform_unregister_reset
);
754 MODULE_VERSION(DRIVER_VERSION
);
755 MODULE_LICENSE("GPL v2");
756 MODULE_AUTHOR(DRIVER_AUTHOR
);
757 MODULE_DESCRIPTION(DRIVER_DESC
);