1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 - Virtual Open Systems
4 * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
7 #define dev_fmt(fmt) "VFIO: " fmt
9 #include <linux/device.h>
10 #include <linux/acpi.h>
11 #include <linux/iommu.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/slab.h>
16 #include <linux/types.h>
17 #include <linux/uaccess.h>
18 #include <linux/vfio.h>
20 #include "vfio_platform_private.h"
22 #define DRIVER_VERSION "0.10"
23 #define DRIVER_AUTHOR "Antonios Motakis <a.motakis@virtualopensystems.com>"
24 #define DRIVER_DESC "VFIO platform base module"
26 #define VFIO_PLATFORM_IS_ACPI(vdev) ((vdev)->acpihid != NULL)
28 static LIST_HEAD(reset_list
);
29 static DEFINE_MUTEX(driver_lock
);
31 static vfio_platform_reset_fn_t
vfio_platform_lookup_reset(const char *compat
,
32 struct module
**module
)
34 struct vfio_platform_reset_node
*iter
;
35 vfio_platform_reset_fn_t reset_fn
= NULL
;
37 mutex_lock(&driver_lock
);
38 list_for_each_entry(iter
, &reset_list
, link
) {
39 if (!strcmp(iter
->compat
, compat
) &&
40 try_module_get(iter
->owner
)) {
41 *module
= iter
->owner
;
42 reset_fn
= iter
->of_reset
;
46 mutex_unlock(&driver_lock
);
50 static int vfio_platform_acpi_probe(struct vfio_platform_device
*vdev
,
53 struct acpi_device
*adev
;
58 adev
= ACPI_COMPANION(dev
);
60 dev_err(dev
, "ACPI companion device not found for %s\n",
66 vdev
->acpihid
= acpi_device_hid(adev
);
68 return WARN_ON(!vdev
->acpihid
) ? -EINVAL
: 0;
71 static int vfio_platform_acpi_call_reset(struct vfio_platform_device
*vdev
,
72 const char **extra_dbg
)
75 struct device
*dev
= vdev
->device
;
76 acpi_handle handle
= ACPI_HANDLE(dev
);
79 acpi_ret
= acpi_evaluate_object(handle
, "_RST", NULL
, NULL
);
80 if (ACPI_FAILURE(acpi_ret
)) {
82 *extra_dbg
= acpi_format_exception(acpi_ret
);
92 static bool vfio_platform_acpi_has_reset(struct vfio_platform_device
*vdev
)
95 struct device
*dev
= vdev
->device
;
96 acpi_handle handle
= ACPI_HANDLE(dev
);
98 return acpi_has_method(handle
, "_RST");
104 static bool vfio_platform_has_reset(struct vfio_platform_device
*vdev
)
106 if (VFIO_PLATFORM_IS_ACPI(vdev
))
107 return vfio_platform_acpi_has_reset(vdev
);
109 return vdev
->of_reset
? true : false;
112 static int vfio_platform_get_reset(struct vfio_platform_device
*vdev
)
114 if (VFIO_PLATFORM_IS_ACPI(vdev
))
115 return vfio_platform_acpi_has_reset(vdev
) ? 0 : -ENOENT
;
117 vdev
->of_reset
= vfio_platform_lookup_reset(vdev
->compat
,
118 &vdev
->reset_module
);
119 if (!vdev
->of_reset
) {
120 request_module("vfio-reset:%s", vdev
->compat
);
121 vdev
->of_reset
= vfio_platform_lookup_reset(vdev
->compat
,
122 &vdev
->reset_module
);
125 return vdev
->of_reset
? 0 : -ENOENT
;
128 static void vfio_platform_put_reset(struct vfio_platform_device
*vdev
)
130 if (VFIO_PLATFORM_IS_ACPI(vdev
))
134 module_put(vdev
->reset_module
);
137 static int vfio_platform_regions_init(struct vfio_platform_device
*vdev
)
141 while (vdev
->get_resource(vdev
, cnt
))
144 vdev
->regions
= kcalloc(cnt
, sizeof(struct vfio_platform_region
),
149 for (i
= 0; i
< cnt
; i
++) {
150 struct resource
*res
=
151 vdev
->get_resource(vdev
, i
);
153 vdev
->regions
[i
].addr
= res
->start
;
154 vdev
->regions
[i
].size
= resource_size(res
);
155 vdev
->regions
[i
].flags
= 0;
157 switch (resource_type(res
)) {
159 vdev
->regions
[i
].type
= VFIO_PLATFORM_REGION_TYPE_MMIO
;
160 vdev
->regions
[i
].flags
|= VFIO_REGION_INFO_FLAG_READ
;
161 if (!(res
->flags
& IORESOURCE_READONLY
))
162 vdev
->regions
[i
].flags
|=
163 VFIO_REGION_INFO_FLAG_WRITE
;
166 * Only regions addressed with PAGE granularity may be
169 if (!(vdev
->regions
[i
].addr
& ~PAGE_MASK
) &&
170 !(vdev
->regions
[i
].size
& ~PAGE_MASK
))
171 vdev
->regions
[i
].flags
|=
172 VFIO_REGION_INFO_FLAG_MMAP
;
176 vdev
->regions
[i
].type
= VFIO_PLATFORM_REGION_TYPE_PIO
;
183 vdev
->num_regions
= cnt
;
187 kfree(vdev
->regions
);
191 static void vfio_platform_regions_cleanup(struct vfio_platform_device
*vdev
)
195 for (i
= 0; i
< vdev
->num_regions
; i
++)
196 iounmap(vdev
->regions
[i
].ioaddr
);
198 vdev
->num_regions
= 0;
199 kfree(vdev
->regions
);
202 static int vfio_platform_call_reset(struct vfio_platform_device
*vdev
,
203 const char **extra_dbg
)
205 if (VFIO_PLATFORM_IS_ACPI(vdev
)) {
206 dev_info(vdev
->device
, "reset\n");
207 return vfio_platform_acpi_call_reset(vdev
, extra_dbg
);
208 } else if (vdev
->of_reset
) {
209 dev_info(vdev
->device
, "reset\n");
210 return vdev
->of_reset(vdev
);
213 dev_warn(vdev
->device
, "no reset function found!\n");
217 void vfio_platform_close_device(struct vfio_device
*core_vdev
)
219 struct vfio_platform_device
*vdev
=
220 container_of(core_vdev
, struct vfio_platform_device
, vdev
);
221 const char *extra_dbg
= NULL
;
224 ret
= vfio_platform_call_reset(vdev
, &extra_dbg
);
225 if (WARN_ON(ret
&& vdev
->reset_required
)) {
228 "reset driver is required and reset call failed in release (%d) %s\n",
229 ret
, extra_dbg
? extra_dbg
: "");
231 pm_runtime_put(vdev
->device
);
232 vfio_platform_regions_cleanup(vdev
);
233 vfio_platform_irq_cleanup(vdev
);
235 EXPORT_SYMBOL_GPL(vfio_platform_close_device
);
237 int vfio_platform_open_device(struct vfio_device
*core_vdev
)
239 struct vfio_platform_device
*vdev
=
240 container_of(core_vdev
, struct vfio_platform_device
, vdev
);
241 const char *extra_dbg
= NULL
;
244 ret
= vfio_platform_regions_init(vdev
);
248 ret
= vfio_platform_irq_init(vdev
);
252 ret
= pm_runtime_get_sync(vdev
->device
);
256 ret
= vfio_platform_call_reset(vdev
, &extra_dbg
);
257 if (ret
&& vdev
->reset_required
) {
260 "reset driver is required and reset call failed in open (%d) %s\n",
261 ret
, extra_dbg
? extra_dbg
: "");
267 pm_runtime_put(vdev
->device
);
268 vfio_platform_irq_cleanup(vdev
);
270 vfio_platform_regions_cleanup(vdev
);
273 EXPORT_SYMBOL_GPL(vfio_platform_open_device
);
275 long vfio_platform_ioctl(struct vfio_device
*core_vdev
,
276 unsigned int cmd
, unsigned long arg
)
278 struct vfio_platform_device
*vdev
=
279 container_of(core_vdev
, struct vfio_platform_device
, vdev
);
283 if (cmd
== VFIO_DEVICE_GET_INFO
) {
284 struct vfio_device_info info
;
286 minsz
= offsetofend(struct vfio_device_info
, num_irqs
);
288 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
291 if (info
.argsz
< minsz
)
294 if (vfio_platform_has_reset(vdev
))
295 vdev
->flags
|= VFIO_DEVICE_FLAGS_RESET
;
296 info
.flags
= vdev
->flags
;
297 info
.num_regions
= vdev
->num_regions
;
298 info
.num_irqs
= vdev
->num_irqs
;
300 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
303 } else if (cmd
== VFIO_DEVICE_GET_REGION_INFO
) {
304 struct vfio_region_info info
;
306 minsz
= offsetofend(struct vfio_region_info
, offset
);
308 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
311 if (info
.argsz
< minsz
)
314 if (info
.index
>= vdev
->num_regions
)
317 /* map offset to the physical address */
318 info
.offset
= VFIO_PLATFORM_INDEX_TO_OFFSET(info
.index
);
319 info
.size
= vdev
->regions
[info
.index
].size
;
320 info
.flags
= vdev
->regions
[info
.index
].flags
;
322 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
325 } else if (cmd
== VFIO_DEVICE_GET_IRQ_INFO
) {
326 struct vfio_irq_info info
;
328 minsz
= offsetofend(struct vfio_irq_info
, count
);
330 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
333 if (info
.argsz
< minsz
)
336 if (info
.index
>= vdev
->num_irqs
)
339 info
.flags
= vdev
->irqs
[info
.index
].flags
;
340 info
.count
= vdev
->irqs
[info
.index
].count
;
342 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
345 } else if (cmd
== VFIO_DEVICE_SET_IRQS
) {
346 struct vfio_irq_set hdr
;
349 size_t data_size
= 0;
351 minsz
= offsetofend(struct vfio_irq_set
, count
);
353 if (copy_from_user(&hdr
, (void __user
*)arg
, minsz
))
356 ret
= vfio_set_irqs_validate_and_prepare(&hdr
, vdev
->num_irqs
,
357 vdev
->num_irqs
, &data_size
);
362 data
= memdup_user((void __user
*)(arg
+ minsz
),
365 return PTR_ERR(data
);
368 mutex_lock(&vdev
->igate
);
370 ret
= vfio_platform_set_irqs_ioctl(vdev
, hdr
.flags
, hdr
.index
,
371 hdr
.start
, hdr
.count
, data
);
372 mutex_unlock(&vdev
->igate
);
377 } else if (cmd
== VFIO_DEVICE_RESET
) {
378 return vfio_platform_call_reset(vdev
, NULL
);
383 EXPORT_SYMBOL_GPL(vfio_platform_ioctl
);
385 static ssize_t
vfio_platform_read_mmio(struct vfio_platform_region
*reg
,
386 char __user
*buf
, size_t count
,
389 unsigned int done
= 0;
393 ioremap(reg
->addr
, reg
->size
);
402 if (count
>= 4 && !(off
% 4)) {
405 val
= ioread32(reg
->ioaddr
+ off
);
406 if (copy_to_user(buf
, &val
, 4))
410 } else if (count
>= 2 && !(off
% 2)) {
413 val
= ioread16(reg
->ioaddr
+ off
);
414 if (copy_to_user(buf
, &val
, 2))
421 val
= ioread8(reg
->ioaddr
+ off
);
422 if (copy_to_user(buf
, &val
, 1))
440 ssize_t
vfio_platform_read(struct vfio_device
*core_vdev
,
441 char __user
*buf
, size_t count
, loff_t
*ppos
)
443 struct vfio_platform_device
*vdev
=
444 container_of(core_vdev
, struct vfio_platform_device
, vdev
);
445 unsigned int index
= VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos
);
446 loff_t off
= *ppos
& VFIO_PLATFORM_OFFSET_MASK
;
448 if (index
>= vdev
->num_regions
)
451 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_READ
))
454 if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_MMIO
)
455 return vfio_platform_read_mmio(&vdev
->regions
[index
],
457 else if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_PIO
)
458 return -EINVAL
; /* not implemented */
462 EXPORT_SYMBOL_GPL(vfio_platform_read
);
464 static ssize_t
vfio_platform_write_mmio(struct vfio_platform_region
*reg
,
465 const char __user
*buf
, size_t count
,
468 unsigned int done
= 0;
472 ioremap(reg
->addr
, reg
->size
);
481 if (count
>= 4 && !(off
% 4)) {
484 if (copy_from_user(&val
, buf
, 4))
486 iowrite32(val
, reg
->ioaddr
+ off
);
489 } else if (count
>= 2 && !(off
% 2)) {
492 if (copy_from_user(&val
, buf
, 2))
494 iowrite16(val
, reg
->ioaddr
+ off
);
500 if (copy_from_user(&val
, buf
, 1))
502 iowrite8(val
, reg
->ioaddr
+ off
);
518 ssize_t
vfio_platform_write(struct vfio_device
*core_vdev
, const char __user
*buf
,
519 size_t count
, loff_t
*ppos
)
521 struct vfio_platform_device
*vdev
=
522 container_of(core_vdev
, struct vfio_platform_device
, vdev
);
523 unsigned int index
= VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos
);
524 loff_t off
= *ppos
& VFIO_PLATFORM_OFFSET_MASK
;
526 if (index
>= vdev
->num_regions
)
529 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_WRITE
))
532 if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_MMIO
)
533 return vfio_platform_write_mmio(&vdev
->regions
[index
],
535 else if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_PIO
)
536 return -EINVAL
; /* not implemented */
540 EXPORT_SYMBOL_GPL(vfio_platform_write
);
542 static int vfio_platform_mmap_mmio(struct vfio_platform_region region
,
543 struct vm_area_struct
*vma
)
545 u64 req_len
, pgoff
, req_start
;
547 req_len
= vma
->vm_end
- vma
->vm_start
;
548 pgoff
= vma
->vm_pgoff
&
549 ((1U << (VFIO_PLATFORM_OFFSET_SHIFT
- PAGE_SHIFT
)) - 1);
550 req_start
= pgoff
<< PAGE_SHIFT
;
552 if (region
.size
< PAGE_SIZE
|| req_start
+ req_len
> region
.size
)
555 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
556 vma
->vm_pgoff
= (region
.addr
>> PAGE_SHIFT
) + pgoff
;
558 return remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
559 req_len
, vma
->vm_page_prot
);
562 int vfio_platform_mmap(struct vfio_device
*core_vdev
, struct vm_area_struct
*vma
)
564 struct vfio_platform_device
*vdev
=
565 container_of(core_vdev
, struct vfio_platform_device
, vdev
);
568 index
= vma
->vm_pgoff
>> (VFIO_PLATFORM_OFFSET_SHIFT
- PAGE_SHIFT
);
570 if (vma
->vm_end
< vma
->vm_start
)
572 if (!(vma
->vm_flags
& VM_SHARED
))
574 if (index
>= vdev
->num_regions
)
576 if (vma
->vm_start
& ~PAGE_MASK
)
578 if (vma
->vm_end
& ~PAGE_MASK
)
581 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_MMAP
))
584 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_READ
)
585 && (vma
->vm_flags
& VM_READ
))
588 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_WRITE
)
589 && (vma
->vm_flags
& VM_WRITE
))
592 vma
->vm_private_data
= vdev
;
594 if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_MMIO
)
595 return vfio_platform_mmap_mmio(vdev
->regions
[index
], vma
);
597 else if (vdev
->regions
[index
].type
& VFIO_PLATFORM_REGION_TYPE_PIO
)
598 return -EINVAL
; /* not implemented */
602 EXPORT_SYMBOL_GPL(vfio_platform_mmap
);
604 static int vfio_platform_of_probe(struct vfio_platform_device
*vdev
,
609 ret
= device_property_read_string(dev
, "compatible",
612 dev_err(dev
, "Cannot retrieve compat for %s\n", vdev
->name
);
618 * There can be two kernel build combinations. One build where
619 * ACPI is not selected in Kconfig and another one with the ACPI Kconfig.
621 * In the first case, vfio_platform_acpi_probe will return since
622 * acpi_disabled is 1. DT user will not see any kind of messages from
625 * In the second case, both DT and ACPI is compiled in but the system is
626 * booting with any of these combinations.
628 * If the firmware is DT type, then acpi_disabled is 1. The ACPI probe routine
629 * terminates immediately without any messages.
631 * If the firmware is ACPI type, then acpi_disabled is 0. All other checks are
632 * valid checks. We cannot claim that this system is DT.
634 int vfio_platform_init_common(struct vfio_platform_device
*vdev
)
637 struct device
*dev
= vdev
->vdev
.dev
;
639 ret
= vfio_platform_acpi_probe(vdev
, dev
);
641 ret
= vfio_platform_of_probe(vdev
, dev
);
647 mutex_init(&vdev
->igate
);
649 ret
= vfio_platform_get_reset(vdev
);
650 if (ret
&& vdev
->reset_required
) {
651 dev_err(dev
, "No reset function found for device %s\n",
658 EXPORT_SYMBOL_GPL(vfio_platform_init_common
);
660 void vfio_platform_release_common(struct vfio_platform_device
*vdev
)
662 vfio_platform_put_reset(vdev
);
664 EXPORT_SYMBOL_GPL(vfio_platform_release_common
);
666 void __vfio_platform_register_reset(struct vfio_platform_reset_node
*node
)
668 mutex_lock(&driver_lock
);
669 list_add(&node
->link
, &reset_list
);
670 mutex_unlock(&driver_lock
);
672 EXPORT_SYMBOL_GPL(__vfio_platform_register_reset
);
674 void vfio_platform_unregister_reset(const char *compat
,
675 vfio_platform_reset_fn_t fn
)
677 struct vfio_platform_reset_node
*iter
, *temp
;
679 mutex_lock(&driver_lock
);
680 list_for_each_entry_safe(iter
, temp
, &reset_list
, link
) {
681 if (!strcmp(iter
->compat
, compat
) && (iter
->of_reset
== fn
)) {
682 list_del(&iter
->link
);
687 mutex_unlock(&driver_lock
);
690 EXPORT_SYMBOL_GPL(vfio_platform_unregister_reset
);
692 MODULE_VERSION(DRIVER_VERSION
);
693 MODULE_LICENSE("GPL v2");
694 MODULE_AUTHOR(DRIVER_AUTHOR
);
695 MODULE_DESCRIPTION(DRIVER_DESC
);