1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 - Virtual Open Systems
4 * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
7 #define dev_fmt(fmt) "VFIO: " fmt
9 #include <linux/device.h>
10 #include <linux/acpi.h>
11 #include <linux/iommu.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/slab.h>
16 #include <linux/types.h>
17 #include <linux/uaccess.h>
18 #include <linux/vfio.h>
20 #include "vfio_platform_private.h"
22 #define DRIVER_VERSION "0.10"
23 #define DRIVER_AUTHOR "Antonios Motakis <a.motakis@virtualopensystems.com>"
24 #define DRIVER_DESC "VFIO platform base module"
26 #define VFIO_PLATFORM_IS_ACPI(vdev) ((vdev)->acpihid != NULL)
28 static LIST_HEAD(reset_list
);
29 static DEFINE_MUTEX(driver_lock
);
31 static vfio_platform_reset_fn_t
vfio_platform_lookup_reset(const char *compat
,
32 struct module
**module
)
34 struct vfio_platform_reset_node
*iter
;
35 vfio_platform_reset_fn_t reset_fn
= NULL
;
37 mutex_lock(&driver_lock
);
38 list_for_each_entry(iter
, &reset_list
, link
) {
39 if (!strcmp(iter
->compat
, compat
) &&
40 try_module_get(iter
->owner
)) {
41 *module
= iter
->owner
;
42 reset_fn
= iter
->of_reset
;
46 mutex_unlock(&driver_lock
);
50 static int vfio_platform_acpi_probe(struct vfio_platform_device
*vdev
,
53 struct acpi_device
*adev
;
58 adev
= ACPI_COMPANION(dev
);
60 dev_err(dev
, "ACPI companion device not found for %s\n",
66 vdev
->acpihid
= acpi_device_hid(adev
);
68 return WARN_ON(!vdev
->acpihid
) ? -EINVAL
: 0;
71 static int vfio_platform_acpi_call_reset(struct vfio_platform_device
*vdev
,
72 const char **extra_dbg
)
75 struct acpi_buffer buffer
= { ACPI_ALLOCATE_BUFFER
, NULL
};
76 struct device
*dev
= vdev
->device
;
77 acpi_handle handle
= ACPI_HANDLE(dev
);
80 acpi_ret
= acpi_evaluate_object(handle
, "_RST", NULL
, &buffer
);
81 if (ACPI_FAILURE(acpi_ret
)) {
83 *extra_dbg
= acpi_format_exception(acpi_ret
);
93 static bool vfio_platform_acpi_has_reset(struct vfio_platform_device
*vdev
)
96 struct device
*dev
= vdev
->device
;
97 acpi_handle handle
= ACPI_HANDLE(dev
);
99 return acpi_has_method(handle
, "_RST");
105 static bool vfio_platform_has_reset(struct vfio_platform_device
*vdev
)
107 if (VFIO_PLATFORM_IS_ACPI(vdev
))
108 return vfio_platform_acpi_has_reset(vdev
);
110 return vdev
->of_reset
? true : false;
113 static int vfio_platform_get_reset(struct vfio_platform_device
*vdev
)
115 if (VFIO_PLATFORM_IS_ACPI(vdev
))
116 return vfio_platform_acpi_has_reset(vdev
) ? 0 : -ENOENT
;
118 vdev
->of_reset
= vfio_platform_lookup_reset(vdev
->compat
,
119 &vdev
->reset_module
);
120 if (!vdev
->of_reset
) {
121 request_module("vfio-reset:%s", vdev
->compat
);
122 vdev
->of_reset
= vfio_platform_lookup_reset(vdev
->compat
,
123 &vdev
->reset_module
);
126 return vdev
->of_reset
? 0 : -ENOENT
;
129 static void vfio_platform_put_reset(struct vfio_platform_device
*vdev
)
131 if (VFIO_PLATFORM_IS_ACPI(vdev
))
135 module_put(vdev
->reset_module
);
138 static int vfio_platform_regions_init(struct vfio_platform_device
*vdev
)
142 while (vdev
->get_resource(vdev
, cnt
))
145 vdev
->regions
= kcalloc(cnt
, sizeof(struct vfio_platform_region
),
150 for (i
= 0; i
< cnt
; i
++) {
151 struct resource
*res
=
152 vdev
->get_resource(vdev
, i
);
157 vdev
->regions
[i
].addr
= res
->start
;
158 vdev
->regions
[i
].size
= resource_size(res
);
159 vdev
->regions
[i
].flags
= 0;
161 switch (resource_type(res
)) {
163 vdev
->regions
[i
].type
= VFIO_PLATFORM_REGION_TYPE_MMIO
;
164 vdev
->regions
[i
].flags
|= VFIO_REGION_INFO_FLAG_READ
;
165 if (!(res
->flags
& IORESOURCE_READONLY
))
166 vdev
->regions
[i
].flags
|=
167 VFIO_REGION_INFO_FLAG_WRITE
;
170 * Only regions addressed with PAGE granularity may be
173 if (!(vdev
->regions
[i
].addr
& ~PAGE_MASK
) &&
174 !(vdev
->regions
[i
].size
& ~PAGE_MASK
))
175 vdev
->regions
[i
].flags
|=
176 VFIO_REGION_INFO_FLAG_MMAP
;
180 vdev
->regions
[i
].type
= VFIO_PLATFORM_REGION_TYPE_PIO
;
187 vdev
->num_regions
= cnt
;
191 kfree(vdev
->regions
);
195 static void vfio_platform_regions_cleanup(struct vfio_platform_device
*vdev
)
199 for (i
= 0; i
< vdev
->num_regions
; i
++)
200 iounmap(vdev
->regions
[i
].ioaddr
);
202 vdev
->num_regions
= 0;
203 kfree(vdev
->regions
);
206 static int vfio_platform_call_reset(struct vfio_platform_device
*vdev
,
207 const char **extra_dbg
)
209 if (VFIO_PLATFORM_IS_ACPI(vdev
)) {
210 dev_info(vdev
->device
, "reset\n");
211 return vfio_platform_acpi_call_reset(vdev
, extra_dbg
);
212 } else if (vdev
->of_reset
) {
213 dev_info(vdev
->device
, "reset\n");
214 return vdev
->of_reset(vdev
);
217 dev_warn(vdev
->device
, "no reset function found!\n");
221 static void vfio_platform_release(void *device_data
)
223 struct vfio_platform_device
*vdev
= device_data
;
225 mutex_lock(&driver_lock
);
227 if (!(--vdev
->refcnt
)) {
228 const char *extra_dbg
= NULL
;
231 ret
= vfio_platform_call_reset(vdev
, &extra_dbg
);
232 if (ret
&& vdev
->reset_required
) {
233 dev_warn(vdev
->device
, "reset driver is required and reset call failed in release (%d) %s\n",
234 ret
, extra_dbg
? extra_dbg
: "");
237 pm_runtime_put(vdev
->device
);
238 vfio_platform_regions_cleanup(vdev
);
239 vfio_platform_irq_cleanup(vdev
);
242 mutex_unlock(&driver_lock
);
244 module_put(vdev
->parent_module
);
247 static int vfio_platform_open(void *device_data
)
249 struct vfio_platform_device
*vdev
= device_data
;
252 if (!try_module_get(vdev
->parent_module
))
255 mutex_lock(&driver_lock
);
258 const char *extra_dbg
= NULL
;
260 ret
= vfio_platform_regions_init(vdev
);
264 ret
= vfio_platform_irq_init(vdev
);
268 ret
= pm_runtime_get_sync(vdev
->device
);
272 ret
= vfio_platform_call_reset(vdev
, &extra_dbg
);
273 if (ret
&& vdev
->reset_required
) {
274 dev_warn(vdev
->device
, "reset driver is required and reset call failed in open (%d) %s\n",
275 ret
, extra_dbg
? extra_dbg
: "");
282 mutex_unlock(&driver_lock
);
286 pm_runtime_put(vdev
->device
);
287 vfio_platform_irq_cleanup(vdev
);
289 vfio_platform_regions_cleanup(vdev
);
291 mutex_unlock(&driver_lock
);
292 module_put(THIS_MODULE
);
296 static long vfio_platform_ioctl(void *device_data
,
297 unsigned int cmd
, unsigned long arg
)
299 struct vfio_platform_device
*vdev
= device_data
;
302 if (cmd
== VFIO_DEVICE_GET_INFO
) {
303 struct vfio_device_info info
;
305 minsz
= offsetofend(struct vfio_device_info
, num_irqs
);
307 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
310 if (info
.argsz
< minsz
)
313 if (vfio_platform_has_reset(vdev
))
314 vdev
->flags
|= VFIO_DEVICE_FLAGS_RESET
;
315 info
.flags
= vdev
->flags
;
316 info
.num_regions
= vdev
->num_regions
;
317 info
.num_irqs
= vdev
->num_irqs
;
319 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
322 } else if (cmd
== VFIO_DEVICE_GET_REGION_INFO
) {
323 struct vfio_region_info info
;
325 minsz
= offsetofend(struct vfio_region_info
, offset
);
327 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
330 if (info
.argsz
< minsz
)
333 if (info
.index
>= vdev
->num_regions
)
336 /* map offset to the physical address */
337 info
.offset
= VFIO_PLATFORM_INDEX_TO_OFFSET(info
.index
);
338 info
.size
= vdev
->regions
[info
.index
].size
;
339 info
.flags
= vdev
->regions
[info
.index
].flags
;
341 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
344 } else if (cmd
== VFIO_DEVICE_GET_IRQ_INFO
) {
345 struct vfio_irq_info info
;
347 minsz
= offsetofend(struct vfio_irq_info
, count
);
349 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
352 if (info
.argsz
< minsz
)
355 if (info
.index
>= vdev
->num_irqs
)
358 info
.flags
= vdev
->irqs
[info
.index
].flags
;
359 info
.count
= vdev
->irqs
[info
.index
].count
;
361 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
364 } else if (cmd
== VFIO_DEVICE_SET_IRQS
) {
365 struct vfio_irq_set hdr
;
368 size_t data_size
= 0;
370 minsz
= offsetofend(struct vfio_irq_set
, count
);
372 if (copy_from_user(&hdr
, (void __user
*)arg
, minsz
))
375 ret
= vfio_set_irqs_validate_and_prepare(&hdr
, vdev
->num_irqs
,
376 vdev
->num_irqs
, &data_size
);
381 data
= memdup_user((void __user
*)(arg
+ minsz
),
384 return PTR_ERR(data
);
387 mutex_lock(&vdev
->igate
);
389 ret
= vfio_platform_set_irqs_ioctl(vdev
, hdr
.flags
, hdr
.index
,
390 hdr
.start
, hdr
.count
, data
);
391 mutex_unlock(&vdev
->igate
);
396 } else if (cmd
== VFIO_DEVICE_RESET
) {
397 return vfio_platform_call_reset(vdev
, NULL
);
403 static ssize_t
vfio_platform_read_mmio(struct vfio_platform_region
*reg
,
404 char __user
*buf
, size_t count
,
407 unsigned int done
= 0;
411 ioremap(reg
->addr
, reg
->size
);
420 if (count
>= 4 && !(off
% 4)) {
423 val
= ioread32(reg
->ioaddr
+ off
);
424 if (copy_to_user(buf
, &val
, 4))
428 } else if (count
>= 2 && !(off
% 2)) {
431 val
= ioread16(reg
->ioaddr
+ off
);
432 if (copy_to_user(buf
, &val
, 2))
439 val
= ioread8(reg
->ioaddr
+ off
);
440 if (copy_to_user(buf
, &val
, 1))
458 static ssize_t
vfio_platform_read(void *device_data
, char __user
*buf
,
459 size_t count
, loff_t
*ppos
)
461 struct vfio_platform_device
*vdev
= device_data
;
462 unsigned int index
= VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos
);
463 loff_t off
= *ppos
& VFIO_PLATFORM_OFFSET_MASK
;
465 if (index
>= vdev
->num_regions
)
468 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_READ
))
471 if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_MMIO
)
472 return vfio_platform_read_mmio(&vdev
->regions
[index
],
474 else if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_PIO
)
475 return -EINVAL
; /* not implemented */
480 static ssize_t
vfio_platform_write_mmio(struct vfio_platform_region
*reg
,
481 const char __user
*buf
, size_t count
,
484 unsigned int done
= 0;
488 ioremap(reg
->addr
, reg
->size
);
497 if (count
>= 4 && !(off
% 4)) {
500 if (copy_from_user(&val
, buf
, 4))
502 iowrite32(val
, reg
->ioaddr
+ off
);
505 } else if (count
>= 2 && !(off
% 2)) {
508 if (copy_from_user(&val
, buf
, 2))
510 iowrite16(val
, reg
->ioaddr
+ off
);
516 if (copy_from_user(&val
, buf
, 1))
518 iowrite8(val
, reg
->ioaddr
+ off
);
534 static ssize_t
vfio_platform_write(void *device_data
, const char __user
*buf
,
535 size_t count
, loff_t
*ppos
)
537 struct vfio_platform_device
*vdev
= device_data
;
538 unsigned int index
= VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos
);
539 loff_t off
= *ppos
& VFIO_PLATFORM_OFFSET_MASK
;
541 if (index
>= vdev
->num_regions
)
544 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_WRITE
))
547 if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_MMIO
)
548 return vfio_platform_write_mmio(&vdev
->regions
[index
],
550 else if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_PIO
)
551 return -EINVAL
; /* not implemented */
556 static int vfio_platform_mmap_mmio(struct vfio_platform_region region
,
557 struct vm_area_struct
*vma
)
559 u64 req_len
, pgoff
, req_start
;
561 req_len
= vma
->vm_end
- vma
->vm_start
;
562 pgoff
= vma
->vm_pgoff
&
563 ((1U << (VFIO_PLATFORM_OFFSET_SHIFT
- PAGE_SHIFT
)) - 1);
564 req_start
= pgoff
<< PAGE_SHIFT
;
566 if (region
.size
< PAGE_SIZE
|| req_start
+ req_len
> region
.size
)
569 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
570 vma
->vm_pgoff
= (region
.addr
>> PAGE_SHIFT
) + pgoff
;
572 return remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
573 req_len
, vma
->vm_page_prot
);
576 static int vfio_platform_mmap(void *device_data
, struct vm_area_struct
*vma
)
578 struct vfio_platform_device
*vdev
= device_data
;
581 index
= vma
->vm_pgoff
>> (VFIO_PLATFORM_OFFSET_SHIFT
- PAGE_SHIFT
);
583 if (vma
->vm_end
< vma
->vm_start
)
585 if (!(vma
->vm_flags
& VM_SHARED
))
587 if (index
>= vdev
->num_regions
)
589 if (vma
->vm_start
& ~PAGE_MASK
)
591 if (vma
->vm_end
& ~PAGE_MASK
)
594 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_MMAP
))
597 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_READ
)
598 && (vma
->vm_flags
& VM_READ
))
601 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_WRITE
)
602 && (vma
->vm_flags
& VM_WRITE
))
605 vma
->vm_private_data
= vdev
;
607 if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_MMIO
)
608 return vfio_platform_mmap_mmio(vdev
->regions
[index
], vma
);
610 else if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_PIO
)
611 return -EINVAL
; /* not implemented */
616 static const struct vfio_device_ops vfio_platform_ops
= {
617 .name
= "vfio-platform",
618 .open
= vfio_platform_open
,
619 .release
= vfio_platform_release
,
620 .ioctl
= vfio_platform_ioctl
,
621 .read
= vfio_platform_read
,
622 .write
= vfio_platform_write
,
623 .mmap
= vfio_platform_mmap
,
626 static int vfio_platform_of_probe(struct vfio_platform_device
*vdev
,
631 ret
= device_property_read_string(dev
, "compatible",
634 dev_err(dev
, "Cannot retrieve compat for %s\n", vdev
->name
);
640 * There can be two kernel build combinations. One build where
641 * ACPI is not selected in Kconfig and another one with the ACPI Kconfig.
643 * In the first case, vfio_platform_acpi_probe will return since
644 * acpi_disabled is 1. DT user will not see any kind of messages from
647 * In the second case, both DT and ACPI is compiled in but the system is
648 * booting with any of these combinations.
650 * If the firmware is DT type, then acpi_disabled is 1. The ACPI probe routine
651 * terminates immediately without any messages.
653 * If the firmware is ACPI type, then acpi_disabled is 0. All other checks are
654 * valid checks. We cannot claim that this system is DT.
656 int vfio_platform_probe_common(struct vfio_platform_device
*vdev
,
659 struct iommu_group
*group
;
665 ret
= vfio_platform_acpi_probe(vdev
, dev
);
667 ret
= vfio_platform_of_probe(vdev
, dev
);
674 ret
= vfio_platform_get_reset(vdev
);
675 if (ret
&& vdev
->reset_required
) {
676 dev_err(dev
, "No reset function found for device %s\n",
681 group
= vfio_iommu_group_get(dev
);
683 dev_err(dev
, "No IOMMU group for device %s\n", vdev
->name
);
688 ret
= vfio_add_group_dev(dev
, &vfio_platform_ops
, vdev
);
692 mutex_init(&vdev
->igate
);
694 pm_runtime_enable(vdev
->device
);
698 vfio_iommu_group_put(group
, dev
);
700 vfio_platform_put_reset(vdev
);
703 EXPORT_SYMBOL_GPL(vfio_platform_probe_common
);
705 struct vfio_platform_device
*vfio_platform_remove_common(struct device
*dev
)
707 struct vfio_platform_device
*vdev
;
709 vdev
= vfio_del_group_dev(dev
);
712 pm_runtime_disable(vdev
->device
);
713 vfio_platform_put_reset(vdev
);
714 vfio_iommu_group_put(dev
->iommu_group
, dev
);
719 EXPORT_SYMBOL_GPL(vfio_platform_remove_common
);
721 void __vfio_platform_register_reset(struct vfio_platform_reset_node
*node
)
723 mutex_lock(&driver_lock
);
724 list_add(&node
->link
, &reset_list
);
725 mutex_unlock(&driver_lock
);
727 EXPORT_SYMBOL_GPL(__vfio_platform_register_reset
);
729 void vfio_platform_unregister_reset(const char *compat
,
730 vfio_platform_reset_fn_t fn
)
732 struct vfio_platform_reset_node
*iter
, *temp
;
734 mutex_lock(&driver_lock
);
735 list_for_each_entry_safe(iter
, temp
, &reset_list
, link
) {
736 if (!strcmp(iter
->compat
, compat
) && (iter
->of_reset
== fn
)) {
737 list_del(&iter
->link
);
742 mutex_unlock(&driver_lock
);
745 EXPORT_SYMBOL_GPL(vfio_platform_unregister_reset
);
747 MODULE_VERSION(DRIVER_VERSION
);
748 MODULE_LICENSE("GPL v2");
749 MODULE_AUTHOR(DRIVER_AUTHOR
);
750 MODULE_DESCRIPTION(DRIVER_DESC
);