2 * Copyright (C) 2013 - Virtual Open Systems
3 * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
15 #include <linux/device.h>
16 #include <linux/acpi.h>
17 #include <linux/iommu.h>
18 #include <linux/module.h>
19 #include <linux/mutex.h>
20 #include <linux/slab.h>
21 #include <linux/types.h>
22 #include <linux/uaccess.h>
23 #include <linux/vfio.h>
25 #include "vfio_platform_private.h"
27 #define DRIVER_VERSION "0.10"
28 #define DRIVER_AUTHOR "Antonios Motakis <a.motakis@virtualopensystems.com>"
29 #define DRIVER_DESC "VFIO platform base module"
31 #define VFIO_PLATFORM_IS_ACPI(vdev) ((vdev)->acpihid != NULL)
33 static LIST_HEAD(reset_list
);
34 static DEFINE_MUTEX(driver_lock
);
36 static vfio_platform_reset_fn_t
vfio_platform_lookup_reset(const char *compat
,
37 struct module
**module
)
39 struct vfio_platform_reset_node
*iter
;
40 vfio_platform_reset_fn_t reset_fn
= NULL
;
42 mutex_lock(&driver_lock
);
43 list_for_each_entry(iter
, &reset_list
, link
) {
44 if (!strcmp(iter
->compat
, compat
) &&
45 try_module_get(iter
->owner
)) {
46 *module
= iter
->owner
;
47 reset_fn
= iter
->of_reset
;
51 mutex_unlock(&driver_lock
);
55 static int vfio_platform_acpi_probe(struct vfio_platform_device
*vdev
,
58 struct acpi_device
*adev
;
63 adev
= ACPI_COMPANION(dev
);
65 pr_err("VFIO: ACPI companion device not found for %s\n",
71 vdev
->acpihid
= acpi_device_hid(adev
);
73 return WARN_ON(!vdev
->acpihid
) ? -EINVAL
: 0;
76 static int vfio_platform_acpi_call_reset(struct vfio_platform_device
*vdev
,
77 const char **extra_dbg
)
80 struct acpi_buffer buffer
= { ACPI_ALLOCATE_BUFFER
, NULL
};
81 struct device
*dev
= vdev
->device
;
82 acpi_handle handle
= ACPI_HANDLE(dev
);
85 acpi_ret
= acpi_evaluate_object(handle
, "_RST", NULL
, &buffer
);
86 if (ACPI_FAILURE(acpi_ret
)) {
88 *extra_dbg
= acpi_format_exception(acpi_ret
);
98 static bool vfio_platform_acpi_has_reset(struct vfio_platform_device
*vdev
)
101 struct device
*dev
= vdev
->device
;
102 acpi_handle handle
= ACPI_HANDLE(dev
);
104 return acpi_has_method(handle
, "_RST");
110 static bool vfio_platform_has_reset(struct vfio_platform_device
*vdev
)
112 if (VFIO_PLATFORM_IS_ACPI(vdev
))
113 return vfio_platform_acpi_has_reset(vdev
);
115 return vdev
->of_reset
? true : false;
118 static int vfio_platform_get_reset(struct vfio_platform_device
*vdev
)
120 if (VFIO_PLATFORM_IS_ACPI(vdev
))
121 return vfio_platform_acpi_has_reset(vdev
) ? 0 : -ENOENT
;
123 vdev
->of_reset
= vfio_platform_lookup_reset(vdev
->compat
,
124 &vdev
->reset_module
);
125 if (!vdev
->of_reset
) {
126 request_module("vfio-reset:%s", vdev
->compat
);
127 vdev
->of_reset
= vfio_platform_lookup_reset(vdev
->compat
,
128 &vdev
->reset_module
);
131 return vdev
->of_reset
? 0 : -ENOENT
;
134 static void vfio_platform_put_reset(struct vfio_platform_device
*vdev
)
136 if (VFIO_PLATFORM_IS_ACPI(vdev
))
140 module_put(vdev
->reset_module
);
143 static int vfio_platform_regions_init(struct vfio_platform_device
*vdev
)
147 while (vdev
->get_resource(vdev
, cnt
))
150 vdev
->regions
= kcalloc(cnt
, sizeof(struct vfio_platform_region
),
155 for (i
= 0; i
< cnt
; i
++) {
156 struct resource
*res
=
157 vdev
->get_resource(vdev
, i
);
162 vdev
->regions
[i
].addr
= res
->start
;
163 vdev
->regions
[i
].size
= resource_size(res
);
164 vdev
->regions
[i
].flags
= 0;
166 switch (resource_type(res
)) {
168 vdev
->regions
[i
].type
= VFIO_PLATFORM_REGION_TYPE_MMIO
;
169 vdev
->regions
[i
].flags
|= VFIO_REGION_INFO_FLAG_READ
;
170 if (!(res
->flags
& IORESOURCE_READONLY
))
171 vdev
->regions
[i
].flags
|=
172 VFIO_REGION_INFO_FLAG_WRITE
;
175 * Only regions addressed with PAGE granularity may be
178 if (!(vdev
->regions
[i
].addr
& ~PAGE_MASK
) &&
179 !(vdev
->regions
[i
].size
& ~PAGE_MASK
))
180 vdev
->regions
[i
].flags
|=
181 VFIO_REGION_INFO_FLAG_MMAP
;
185 vdev
->regions
[i
].type
= VFIO_PLATFORM_REGION_TYPE_PIO
;
192 vdev
->num_regions
= cnt
;
196 kfree(vdev
->regions
);
200 static void vfio_platform_regions_cleanup(struct vfio_platform_device
*vdev
)
204 for (i
= 0; i
< vdev
->num_regions
; i
++)
205 iounmap(vdev
->regions
[i
].ioaddr
);
207 vdev
->num_regions
= 0;
208 kfree(vdev
->regions
);
211 static int vfio_platform_call_reset(struct vfio_platform_device
*vdev
,
212 const char **extra_dbg
)
214 if (VFIO_PLATFORM_IS_ACPI(vdev
)) {
215 dev_info(vdev
->device
, "reset\n");
216 return vfio_platform_acpi_call_reset(vdev
, extra_dbg
);
217 } else if (vdev
->of_reset
) {
218 dev_info(vdev
->device
, "reset\n");
219 return vdev
->of_reset(vdev
);
222 dev_warn(vdev
->device
, "no reset function found!\n");
226 static void vfio_platform_release(void *device_data
)
228 struct vfio_platform_device
*vdev
= device_data
;
230 mutex_lock(&driver_lock
);
232 if (!(--vdev
->refcnt
)) {
233 const char *extra_dbg
= NULL
;
236 ret
= vfio_platform_call_reset(vdev
, &extra_dbg
);
237 if (ret
&& vdev
->reset_required
) {
238 dev_warn(vdev
->device
, "reset driver is required and reset call failed in release (%d) %s\n",
239 ret
, extra_dbg
? extra_dbg
: "");
242 vfio_platform_regions_cleanup(vdev
);
243 vfio_platform_irq_cleanup(vdev
);
246 mutex_unlock(&driver_lock
);
248 module_put(vdev
->parent_module
);
251 static int vfio_platform_open(void *device_data
)
253 struct vfio_platform_device
*vdev
= device_data
;
256 if (!try_module_get(vdev
->parent_module
))
259 mutex_lock(&driver_lock
);
262 const char *extra_dbg
= NULL
;
264 ret
= vfio_platform_regions_init(vdev
);
268 ret
= vfio_platform_irq_init(vdev
);
272 ret
= vfio_platform_call_reset(vdev
, &extra_dbg
);
273 if (ret
&& vdev
->reset_required
) {
274 dev_warn(vdev
->device
, "reset driver is required and reset call failed in open (%d) %s\n",
275 ret
, extra_dbg
? extra_dbg
: "");
282 mutex_unlock(&driver_lock
);
286 vfio_platform_irq_cleanup(vdev
);
288 vfio_platform_regions_cleanup(vdev
);
290 mutex_unlock(&driver_lock
);
291 module_put(THIS_MODULE
);
295 static long vfio_platform_ioctl(void *device_data
,
296 unsigned int cmd
, unsigned long arg
)
298 struct vfio_platform_device
*vdev
= device_data
;
301 if (cmd
== VFIO_DEVICE_GET_INFO
) {
302 struct vfio_device_info info
;
304 minsz
= offsetofend(struct vfio_device_info
, num_irqs
);
306 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
309 if (info
.argsz
< minsz
)
312 if (vfio_platform_has_reset(vdev
))
313 vdev
->flags
|= VFIO_DEVICE_FLAGS_RESET
;
314 info
.flags
= vdev
->flags
;
315 info
.num_regions
= vdev
->num_regions
;
316 info
.num_irqs
= vdev
->num_irqs
;
318 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
321 } else if (cmd
== VFIO_DEVICE_GET_REGION_INFO
) {
322 struct vfio_region_info info
;
324 minsz
= offsetofend(struct vfio_region_info
, offset
);
326 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
329 if (info
.argsz
< minsz
)
332 if (info
.index
>= vdev
->num_regions
)
335 /* map offset to the physical address */
336 info
.offset
= VFIO_PLATFORM_INDEX_TO_OFFSET(info
.index
);
337 info
.size
= vdev
->regions
[info
.index
].size
;
338 info
.flags
= vdev
->regions
[info
.index
].flags
;
340 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
343 } else if (cmd
== VFIO_DEVICE_GET_IRQ_INFO
) {
344 struct vfio_irq_info info
;
346 minsz
= offsetofend(struct vfio_irq_info
, count
);
348 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
351 if (info
.argsz
< minsz
)
354 if (info
.index
>= vdev
->num_irqs
)
357 info
.flags
= vdev
->irqs
[info
.index
].flags
;
358 info
.count
= vdev
->irqs
[info
.index
].count
;
360 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
363 } else if (cmd
== VFIO_DEVICE_SET_IRQS
) {
364 struct vfio_irq_set hdr
;
367 size_t data_size
= 0;
369 minsz
= offsetofend(struct vfio_irq_set
, count
);
371 if (copy_from_user(&hdr
, (void __user
*)arg
, minsz
))
374 ret
= vfio_set_irqs_validate_and_prepare(&hdr
, vdev
->num_irqs
,
375 vdev
->num_irqs
, &data_size
);
380 data
= memdup_user((void __user
*)(arg
+ minsz
),
383 return PTR_ERR(data
);
386 mutex_lock(&vdev
->igate
);
388 ret
= vfio_platform_set_irqs_ioctl(vdev
, hdr
.flags
, hdr
.index
,
389 hdr
.start
, hdr
.count
, data
);
390 mutex_unlock(&vdev
->igate
);
395 } else if (cmd
== VFIO_DEVICE_RESET
) {
396 return vfio_platform_call_reset(vdev
, NULL
);
402 static ssize_t
vfio_platform_read_mmio(struct vfio_platform_region
*reg
,
403 char __user
*buf
, size_t count
,
406 unsigned int done
= 0;
410 ioremap_nocache(reg
->addr
, reg
->size
);
419 if (count
>= 4 && !(off
% 4)) {
422 val
= ioread32(reg
->ioaddr
+ off
);
423 if (copy_to_user(buf
, &val
, 4))
427 } else if (count
>= 2 && !(off
% 2)) {
430 val
= ioread16(reg
->ioaddr
+ off
);
431 if (copy_to_user(buf
, &val
, 2))
438 val
= ioread8(reg
->ioaddr
+ off
);
439 if (copy_to_user(buf
, &val
, 1))
457 static ssize_t
vfio_platform_read(void *device_data
, char __user
*buf
,
458 size_t count
, loff_t
*ppos
)
460 struct vfio_platform_device
*vdev
= device_data
;
461 unsigned int index
= VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos
);
462 loff_t off
= *ppos
& VFIO_PLATFORM_OFFSET_MASK
;
464 if (index
>= vdev
->num_regions
)
467 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_READ
))
470 if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_MMIO
)
471 return vfio_platform_read_mmio(&vdev
->regions
[index
],
473 else if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_PIO
)
474 return -EINVAL
; /* not implemented */
479 static ssize_t
vfio_platform_write_mmio(struct vfio_platform_region
*reg
,
480 const char __user
*buf
, size_t count
,
483 unsigned int done
= 0;
487 ioremap_nocache(reg
->addr
, reg
->size
);
496 if (count
>= 4 && !(off
% 4)) {
499 if (copy_from_user(&val
, buf
, 4))
501 iowrite32(val
, reg
->ioaddr
+ off
);
504 } else if (count
>= 2 && !(off
% 2)) {
507 if (copy_from_user(&val
, buf
, 2))
509 iowrite16(val
, reg
->ioaddr
+ off
);
515 if (copy_from_user(&val
, buf
, 1))
517 iowrite8(val
, reg
->ioaddr
+ off
);
533 static ssize_t
vfio_platform_write(void *device_data
, const char __user
*buf
,
534 size_t count
, loff_t
*ppos
)
536 struct vfio_platform_device
*vdev
= device_data
;
537 unsigned int index
= VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos
);
538 loff_t off
= *ppos
& VFIO_PLATFORM_OFFSET_MASK
;
540 if (index
>= vdev
->num_regions
)
543 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_WRITE
))
546 if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_MMIO
)
547 return vfio_platform_write_mmio(&vdev
->regions
[index
],
549 else if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_PIO
)
550 return -EINVAL
; /* not implemented */
555 static int vfio_platform_mmap_mmio(struct vfio_platform_region region
,
556 struct vm_area_struct
*vma
)
558 u64 req_len
, pgoff
, req_start
;
560 req_len
= vma
->vm_end
- vma
->vm_start
;
561 pgoff
= vma
->vm_pgoff
&
562 ((1U << (VFIO_PLATFORM_OFFSET_SHIFT
- PAGE_SHIFT
)) - 1);
563 req_start
= pgoff
<< PAGE_SHIFT
;
565 if (region
.size
< PAGE_SIZE
|| req_start
+ req_len
> region
.size
)
568 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
569 vma
->vm_pgoff
= (region
.addr
>> PAGE_SHIFT
) + pgoff
;
571 return remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
572 req_len
, vma
->vm_page_prot
);
575 static int vfio_platform_mmap(void *device_data
, struct vm_area_struct
*vma
)
577 struct vfio_platform_device
*vdev
= device_data
;
580 index
= vma
->vm_pgoff
>> (VFIO_PLATFORM_OFFSET_SHIFT
- PAGE_SHIFT
);
582 if (vma
->vm_end
< vma
->vm_start
)
584 if (!(vma
->vm_flags
& VM_SHARED
))
586 if (index
>= vdev
->num_regions
)
588 if (vma
->vm_start
& ~PAGE_MASK
)
590 if (vma
->vm_end
& ~PAGE_MASK
)
593 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_MMAP
))
596 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_READ
)
597 && (vma
->vm_flags
& VM_READ
))
600 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_WRITE
)
601 && (vma
->vm_flags
& VM_WRITE
))
604 vma
->vm_private_data
= vdev
;
606 if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_MMIO
)
607 return vfio_platform_mmap_mmio(vdev
->regions
[index
], vma
);
609 else if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_PIO
)
610 return -EINVAL
; /* not implemented */
615 static const struct vfio_device_ops vfio_platform_ops
= {
616 .name
= "vfio-platform",
617 .open
= vfio_platform_open
,
618 .release
= vfio_platform_release
,
619 .ioctl
= vfio_platform_ioctl
,
620 .read
= vfio_platform_read
,
621 .write
= vfio_platform_write
,
622 .mmap
= vfio_platform_mmap
,
625 static int vfio_platform_of_probe(struct vfio_platform_device
*vdev
,
630 ret
= device_property_read_string(dev
, "compatible",
633 pr_err("VFIO: cannot retrieve compat for %s\n",
640 * There can be two kernel build combinations. One build where
641 * ACPI is not selected in Kconfig and another one with the ACPI Kconfig.
643 * In the first case, vfio_platform_acpi_probe will return since
644 * acpi_disabled is 1. DT user will not see any kind of messages from
647 * In the second case, both DT and ACPI is compiled in but the system is
648 * booting with any of these combinations.
650 * If the firmware is DT type, then acpi_disabled is 1. The ACPI probe routine
651 * terminates immediately without any messages.
653 * If the firmware is ACPI type, then acpi_disabled is 0. All other checks are
654 * valid checks. We cannot claim that this system is DT.
656 int vfio_platform_probe_common(struct vfio_platform_device
*vdev
,
659 struct iommu_group
*group
;
665 ret
= vfio_platform_acpi_probe(vdev
, dev
);
667 ret
= vfio_platform_of_probe(vdev
, dev
);
674 ret
= vfio_platform_get_reset(vdev
);
675 if (ret
&& vdev
->reset_required
) {
676 pr_err("vfio: no reset function found for device %s\n",
681 group
= vfio_iommu_group_get(dev
);
683 pr_err("VFIO: No IOMMU group for device %s\n", vdev
->name
);
687 ret
= vfio_add_group_dev(dev
, &vfio_platform_ops
, vdev
);
689 vfio_iommu_group_put(group
, dev
);
693 mutex_init(&vdev
->igate
);
697 EXPORT_SYMBOL_GPL(vfio_platform_probe_common
);
699 struct vfio_platform_device
*vfio_platform_remove_common(struct device
*dev
)
701 struct vfio_platform_device
*vdev
;
703 vdev
= vfio_del_group_dev(dev
);
706 vfio_platform_put_reset(vdev
);
707 vfio_iommu_group_put(dev
->iommu_group
, dev
);
712 EXPORT_SYMBOL_GPL(vfio_platform_remove_common
);
714 void __vfio_platform_register_reset(struct vfio_platform_reset_node
*node
)
716 mutex_lock(&driver_lock
);
717 list_add(&node
->link
, &reset_list
);
718 mutex_unlock(&driver_lock
);
720 EXPORT_SYMBOL_GPL(__vfio_platform_register_reset
);
722 void vfio_platform_unregister_reset(const char *compat
,
723 vfio_platform_reset_fn_t fn
)
725 struct vfio_platform_reset_node
*iter
, *temp
;
727 mutex_lock(&driver_lock
);
728 list_for_each_entry_safe(iter
, temp
, &reset_list
, link
) {
729 if (!strcmp(iter
->compat
, compat
) && (iter
->of_reset
== fn
)) {
730 list_del(&iter
->link
);
735 mutex_unlock(&driver_lock
);
738 EXPORT_SYMBOL_GPL(vfio_platform_unregister_reset
);
740 MODULE_VERSION(DRIVER_VERSION
);
741 MODULE_LICENSE("GPL v2");
742 MODULE_AUTHOR(DRIVER_AUTHOR
);
743 MODULE_DESCRIPTION(DRIVER_DESC
);