WIP FPC-III support
[linux/fpc-iii.git] / drivers / vfio / fsl-mc / vfio_fsl_mc.c
blobf27e25112c4037dc7e3d9926f970fca358dcc648
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3 * Copyright 2013-2016 Freescale Semiconductor Inc.
4 * Copyright 2016-2017,2019-2020 NXP
5 */
7 #include <linux/device.h>
8 #include <linux/iommu.h>
9 #include <linux/module.h>
10 #include <linux/mutex.h>
11 #include <linux/slab.h>
12 #include <linux/types.h>
13 #include <linux/vfio.h>
14 #include <linux/fsl/mc.h>
15 #include <linux/delay.h>
16 #include <linux/io-64-nonatomic-hi-lo.h>
18 #include "vfio_fsl_mc_private.h"
20 static struct fsl_mc_driver vfio_fsl_mc_driver;
22 static DEFINE_MUTEX(reflck_lock);
24 static void vfio_fsl_mc_reflck_get(struct vfio_fsl_mc_reflck *reflck)
26 kref_get(&reflck->kref);
29 static void vfio_fsl_mc_reflck_release(struct kref *kref)
31 struct vfio_fsl_mc_reflck *reflck = container_of(kref,
32 struct vfio_fsl_mc_reflck,
33 kref);
35 mutex_destroy(&reflck->lock);
36 kfree(reflck);
37 mutex_unlock(&reflck_lock);
40 static void vfio_fsl_mc_reflck_put(struct vfio_fsl_mc_reflck *reflck)
42 kref_put_mutex(&reflck->kref, vfio_fsl_mc_reflck_release, &reflck_lock);
45 static struct vfio_fsl_mc_reflck *vfio_fsl_mc_reflck_alloc(void)
47 struct vfio_fsl_mc_reflck *reflck;
49 reflck = kzalloc(sizeof(*reflck), GFP_KERNEL);
50 if (!reflck)
51 return ERR_PTR(-ENOMEM);
53 kref_init(&reflck->kref);
54 mutex_init(&reflck->lock);
56 return reflck;
59 static int vfio_fsl_mc_reflck_attach(struct vfio_fsl_mc_device *vdev)
61 int ret = 0;
63 mutex_lock(&reflck_lock);
64 if (is_fsl_mc_bus_dprc(vdev->mc_dev)) {
65 vdev->reflck = vfio_fsl_mc_reflck_alloc();
66 ret = PTR_ERR_OR_ZERO(vdev->reflck);
67 } else {
68 struct device *mc_cont_dev = vdev->mc_dev->dev.parent;
69 struct vfio_device *device;
70 struct vfio_fsl_mc_device *cont_vdev;
72 device = vfio_device_get_from_dev(mc_cont_dev);
73 if (!device) {
74 ret = -ENODEV;
75 goto unlock;
78 cont_vdev = vfio_device_data(device);
79 if (!cont_vdev || !cont_vdev->reflck) {
80 vfio_device_put(device);
81 ret = -ENODEV;
82 goto unlock;
84 vfio_fsl_mc_reflck_get(cont_vdev->reflck);
85 vdev->reflck = cont_vdev->reflck;
86 vfio_device_put(device);
89 unlock:
90 mutex_unlock(&reflck_lock);
91 return ret;
94 static int vfio_fsl_mc_regions_init(struct vfio_fsl_mc_device *vdev)
96 struct fsl_mc_device *mc_dev = vdev->mc_dev;
97 int count = mc_dev->obj_desc.region_count;
98 int i;
100 vdev->regions = kcalloc(count, sizeof(struct vfio_fsl_mc_region),
101 GFP_KERNEL);
102 if (!vdev->regions)
103 return -ENOMEM;
105 for (i = 0; i < count; i++) {
106 struct resource *res = &mc_dev->regions[i];
107 int no_mmap = is_fsl_mc_bus_dprc(mc_dev);
109 vdev->regions[i].addr = res->start;
110 vdev->regions[i].size = resource_size(res);
111 vdev->regions[i].type = mc_dev->regions[i].flags & IORESOURCE_BITS;
113 * Only regions addressed with PAGE granularity may be
114 * MMAPed securely.
116 if (!no_mmap && !(vdev->regions[i].addr & ~PAGE_MASK) &&
117 !(vdev->regions[i].size & ~PAGE_MASK))
118 vdev->regions[i].flags |=
119 VFIO_REGION_INFO_FLAG_MMAP;
120 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
121 if (!(mc_dev->regions[i].flags & IORESOURCE_READONLY))
122 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE;
125 return 0;
128 static void vfio_fsl_mc_regions_cleanup(struct vfio_fsl_mc_device *vdev)
130 struct fsl_mc_device *mc_dev = vdev->mc_dev;
131 int i;
133 for (i = 0; i < mc_dev->obj_desc.region_count; i++)
134 iounmap(vdev->regions[i].ioaddr);
135 kfree(vdev->regions);
138 static int vfio_fsl_mc_open(void *device_data)
140 struct vfio_fsl_mc_device *vdev = device_data;
141 int ret;
143 if (!try_module_get(THIS_MODULE))
144 return -ENODEV;
146 mutex_lock(&vdev->reflck->lock);
147 if (!vdev->refcnt) {
148 ret = vfio_fsl_mc_regions_init(vdev);
149 if (ret)
150 goto err_reg_init;
152 vdev->refcnt++;
154 mutex_unlock(&vdev->reflck->lock);
156 return 0;
158 err_reg_init:
159 mutex_unlock(&vdev->reflck->lock);
160 module_put(THIS_MODULE);
161 return ret;
164 static void vfio_fsl_mc_release(void *device_data)
166 struct vfio_fsl_mc_device *vdev = device_data;
167 int ret;
169 mutex_lock(&vdev->reflck->lock);
171 if (!(--vdev->refcnt)) {
172 struct fsl_mc_device *mc_dev = vdev->mc_dev;
173 struct device *cont_dev = fsl_mc_cont_dev(&mc_dev->dev);
174 struct fsl_mc_device *mc_cont = to_fsl_mc_device(cont_dev);
176 vfio_fsl_mc_regions_cleanup(vdev);
178 /* reset the device before cleaning up the interrupts */
179 ret = dprc_reset_container(mc_cont->mc_io, 0,
180 mc_cont->mc_handle,
181 mc_cont->obj_desc.id,
182 DPRC_RESET_OPTION_NON_RECURSIVE);
184 if (ret) {
185 dev_warn(&mc_cont->dev, "VFIO_FLS_MC: reset device has failed (%d)\n",
186 ret);
187 WARN_ON(1);
190 vfio_fsl_mc_irqs_cleanup(vdev);
192 fsl_mc_cleanup_irq_pool(mc_cont);
195 mutex_unlock(&vdev->reflck->lock);
197 module_put(THIS_MODULE);
200 static long vfio_fsl_mc_ioctl(void *device_data, unsigned int cmd,
201 unsigned long arg)
203 unsigned long minsz;
204 struct vfio_fsl_mc_device *vdev = device_data;
205 struct fsl_mc_device *mc_dev = vdev->mc_dev;
207 switch (cmd) {
208 case VFIO_DEVICE_GET_INFO:
210 struct vfio_device_info info;
212 minsz = offsetofend(struct vfio_device_info, num_irqs);
214 if (copy_from_user(&info, (void __user *)arg, minsz))
215 return -EFAULT;
217 if (info.argsz < minsz)
218 return -EINVAL;
220 info.flags = VFIO_DEVICE_FLAGS_FSL_MC;
222 if (is_fsl_mc_bus_dprc(mc_dev))
223 info.flags |= VFIO_DEVICE_FLAGS_RESET;
225 info.num_regions = mc_dev->obj_desc.region_count;
226 info.num_irqs = mc_dev->obj_desc.irq_count;
228 return copy_to_user((void __user *)arg, &info, minsz) ?
229 -EFAULT : 0;
231 case VFIO_DEVICE_GET_REGION_INFO:
233 struct vfio_region_info info;
235 minsz = offsetofend(struct vfio_region_info, offset);
237 if (copy_from_user(&info, (void __user *)arg, minsz))
238 return -EFAULT;
240 if (info.argsz < minsz)
241 return -EINVAL;
243 if (info.index >= mc_dev->obj_desc.region_count)
244 return -EINVAL;
246 /* map offset to the physical address */
247 info.offset = VFIO_FSL_MC_INDEX_TO_OFFSET(info.index);
248 info.size = vdev->regions[info.index].size;
249 info.flags = vdev->regions[info.index].flags;
251 if (copy_to_user((void __user *)arg, &info, minsz))
252 return -EFAULT;
253 return 0;
255 case VFIO_DEVICE_GET_IRQ_INFO:
257 struct vfio_irq_info info;
259 minsz = offsetofend(struct vfio_irq_info, count);
260 if (copy_from_user(&info, (void __user *)arg, minsz))
261 return -EFAULT;
263 if (info.argsz < minsz)
264 return -EINVAL;
266 if (info.index >= mc_dev->obj_desc.irq_count)
267 return -EINVAL;
269 info.flags = VFIO_IRQ_INFO_EVENTFD;
270 info.count = 1;
272 if (copy_to_user((void __user *)arg, &info, minsz))
273 return -EFAULT;
274 return 0;
276 case VFIO_DEVICE_SET_IRQS:
278 struct vfio_irq_set hdr;
279 u8 *data = NULL;
280 int ret = 0;
281 size_t data_size = 0;
283 minsz = offsetofend(struct vfio_irq_set, count);
285 if (copy_from_user(&hdr, (void __user *)arg, minsz))
286 return -EFAULT;
288 ret = vfio_set_irqs_validate_and_prepare(&hdr, mc_dev->obj_desc.irq_count,
289 mc_dev->obj_desc.irq_count, &data_size);
290 if (ret)
291 return ret;
293 if (data_size) {
294 data = memdup_user((void __user *)(arg + minsz),
295 data_size);
296 if (IS_ERR(data))
297 return PTR_ERR(data);
300 mutex_lock(&vdev->igate);
301 ret = vfio_fsl_mc_set_irqs_ioctl(vdev, hdr.flags,
302 hdr.index, hdr.start,
303 hdr.count, data);
304 mutex_unlock(&vdev->igate);
305 kfree(data);
307 return ret;
309 case VFIO_DEVICE_RESET:
311 int ret;
312 struct fsl_mc_device *mc_dev = vdev->mc_dev;
314 /* reset is supported only for the DPRC */
315 if (!is_fsl_mc_bus_dprc(mc_dev))
316 return -ENOTTY;
318 ret = dprc_reset_container(mc_dev->mc_io, 0,
319 mc_dev->mc_handle,
320 mc_dev->obj_desc.id,
321 DPRC_RESET_OPTION_NON_RECURSIVE);
322 return ret;
325 default:
326 return -ENOTTY;
330 static ssize_t vfio_fsl_mc_read(void *device_data, char __user *buf,
331 size_t count, loff_t *ppos)
333 struct vfio_fsl_mc_device *vdev = device_data;
334 unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos);
335 loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK;
336 struct fsl_mc_device *mc_dev = vdev->mc_dev;
337 struct vfio_fsl_mc_region *region;
338 u64 data[8];
339 int i;
341 if (index >= mc_dev->obj_desc.region_count)
342 return -EINVAL;
344 region = &vdev->regions[index];
346 if (!(region->flags & VFIO_REGION_INFO_FLAG_READ))
347 return -EINVAL;
349 if (!region->ioaddr) {
350 region->ioaddr = ioremap(region->addr, region->size);
351 if (!region->ioaddr)
352 return -ENOMEM;
355 if (count != 64 || off != 0)
356 return -EINVAL;
358 for (i = 7; i >= 0; i--)
359 data[i] = readq(region->ioaddr + i * sizeof(uint64_t));
361 if (copy_to_user(buf, data, 64))
362 return -EFAULT;
364 return count;
367 #define MC_CMD_COMPLETION_TIMEOUT_MS 5000
368 #define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500
370 static int vfio_fsl_mc_send_command(void __iomem *ioaddr, uint64_t *cmd_data)
372 int i;
373 enum mc_cmd_status status;
374 unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000;
376 /* Write at command parameter into portal */
377 for (i = 7; i >= 1; i--)
378 writeq_relaxed(cmd_data[i], ioaddr + i * sizeof(uint64_t));
380 /* Write command header in the end */
381 writeq(cmd_data[0], ioaddr);
383 /* Wait for response before returning to user-space
384 * This can be optimized in future to even prepare response
385 * before returning to user-space and avoid read ioctl.
387 for (;;) {
388 u64 header;
389 struct mc_cmd_header *resp_hdr;
391 header = cpu_to_le64(readq_relaxed(ioaddr));
393 resp_hdr = (struct mc_cmd_header *)&header;
394 status = (enum mc_cmd_status)resp_hdr->status;
395 if (status != MC_CMD_STATUS_READY)
396 break;
398 udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
399 timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
400 if (timeout_usecs == 0)
401 return -ETIMEDOUT;
404 return 0;
407 static ssize_t vfio_fsl_mc_write(void *device_data, const char __user *buf,
408 size_t count, loff_t *ppos)
410 struct vfio_fsl_mc_device *vdev = device_data;
411 unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos);
412 loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK;
413 struct fsl_mc_device *mc_dev = vdev->mc_dev;
414 struct vfio_fsl_mc_region *region;
415 u64 data[8];
416 int ret;
418 if (index >= mc_dev->obj_desc.region_count)
419 return -EINVAL;
421 region = &vdev->regions[index];
423 if (!(region->flags & VFIO_REGION_INFO_FLAG_WRITE))
424 return -EINVAL;
426 if (!region->ioaddr) {
427 region->ioaddr = ioremap(region->addr, region->size);
428 if (!region->ioaddr)
429 return -ENOMEM;
432 if (count != 64 || off != 0)
433 return -EINVAL;
435 if (copy_from_user(&data, buf, 64))
436 return -EFAULT;
438 ret = vfio_fsl_mc_send_command(region->ioaddr, data);
439 if (ret)
440 return ret;
442 return count;
446 static int vfio_fsl_mc_mmap_mmio(struct vfio_fsl_mc_region region,
447 struct vm_area_struct *vma)
449 u64 size = vma->vm_end - vma->vm_start;
450 u64 pgoff, base;
451 u8 region_cacheable;
453 pgoff = vma->vm_pgoff &
454 ((1U << (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
455 base = pgoff << PAGE_SHIFT;
457 if (region.size < PAGE_SIZE || base + size > region.size)
458 return -EINVAL;
460 region_cacheable = (region.type & FSL_MC_REGION_CACHEABLE) &&
461 (region.type & FSL_MC_REGION_SHAREABLE);
462 if (!region_cacheable)
463 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
465 vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
467 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
468 size, vma->vm_page_prot);
471 static int vfio_fsl_mc_mmap(void *device_data, struct vm_area_struct *vma)
473 struct vfio_fsl_mc_device *vdev = device_data;
474 struct fsl_mc_device *mc_dev = vdev->mc_dev;
475 unsigned int index;
477 index = vma->vm_pgoff >> (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT);
479 if (vma->vm_end < vma->vm_start)
480 return -EINVAL;
481 if (vma->vm_start & ~PAGE_MASK)
482 return -EINVAL;
483 if (vma->vm_end & ~PAGE_MASK)
484 return -EINVAL;
485 if (!(vma->vm_flags & VM_SHARED))
486 return -EINVAL;
487 if (index >= mc_dev->obj_desc.region_count)
488 return -EINVAL;
490 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
491 return -EINVAL;
493 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
494 && (vma->vm_flags & VM_READ))
495 return -EINVAL;
497 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
498 && (vma->vm_flags & VM_WRITE))
499 return -EINVAL;
501 vma->vm_private_data = mc_dev;
503 return vfio_fsl_mc_mmap_mmio(vdev->regions[index], vma);
506 static const struct vfio_device_ops vfio_fsl_mc_ops = {
507 .name = "vfio-fsl-mc",
508 .open = vfio_fsl_mc_open,
509 .release = vfio_fsl_mc_release,
510 .ioctl = vfio_fsl_mc_ioctl,
511 .read = vfio_fsl_mc_read,
512 .write = vfio_fsl_mc_write,
513 .mmap = vfio_fsl_mc_mmap,
516 static int vfio_fsl_mc_bus_notifier(struct notifier_block *nb,
517 unsigned long action, void *data)
519 struct vfio_fsl_mc_device *vdev = container_of(nb,
520 struct vfio_fsl_mc_device, nb);
521 struct device *dev = data;
522 struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
523 struct fsl_mc_device *mc_cont = to_fsl_mc_device(mc_dev->dev.parent);
525 if (action == BUS_NOTIFY_ADD_DEVICE &&
526 vdev->mc_dev == mc_cont) {
527 mc_dev->driver_override = kasprintf(GFP_KERNEL, "%s",
528 vfio_fsl_mc_ops.name);
529 if (!mc_dev->driver_override)
530 dev_warn(dev, "VFIO_FSL_MC: Setting driver override for device in dprc %s failed\n",
531 dev_name(&mc_cont->dev));
532 else
533 dev_info(dev, "VFIO_FSL_MC: Setting driver override for device in dprc %s\n",
534 dev_name(&mc_cont->dev));
535 } else if (action == BUS_NOTIFY_BOUND_DRIVER &&
536 vdev->mc_dev == mc_cont) {
537 struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
539 if (mc_drv && mc_drv != &vfio_fsl_mc_driver)
540 dev_warn(dev, "VFIO_FSL_MC: Object %s bound to driver %s while DPRC bound to vfio-fsl-mc\n",
541 dev_name(dev), mc_drv->driver.name);
544 return 0;
547 static int vfio_fsl_mc_init_device(struct vfio_fsl_mc_device *vdev)
549 struct fsl_mc_device *mc_dev = vdev->mc_dev;
550 int ret;
552 /* Non-dprc devices share mc_io from parent */
553 if (!is_fsl_mc_bus_dprc(mc_dev)) {
554 struct fsl_mc_device *mc_cont = to_fsl_mc_device(mc_dev->dev.parent);
556 mc_dev->mc_io = mc_cont->mc_io;
557 return 0;
560 vdev->nb.notifier_call = vfio_fsl_mc_bus_notifier;
561 ret = bus_register_notifier(&fsl_mc_bus_type, &vdev->nb);
562 if (ret)
563 return ret;
565 /* open DPRC, allocate a MC portal */
566 ret = dprc_setup(mc_dev);
567 if (ret) {
568 dev_err(&mc_dev->dev, "VFIO_FSL_MC: Failed to setup DPRC (%d)\n", ret);
569 goto out_nc_unreg;
572 ret = dprc_scan_container(mc_dev, false);
573 if (ret) {
574 dev_err(&mc_dev->dev, "VFIO_FSL_MC: Container scanning failed (%d)\n", ret);
575 goto out_dprc_cleanup;
578 return 0;
580 out_dprc_cleanup:
581 dprc_remove_devices(mc_dev, NULL, 0);
582 dprc_cleanup(mc_dev);
583 out_nc_unreg:
584 bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
585 vdev->nb.notifier_call = NULL;
587 return ret;
590 static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
592 struct iommu_group *group;
593 struct vfio_fsl_mc_device *vdev;
594 struct device *dev = &mc_dev->dev;
595 int ret;
597 group = vfio_iommu_group_get(dev);
598 if (!group) {
599 dev_err(dev, "VFIO_FSL_MC: No IOMMU group\n");
600 return -EINVAL;
603 vdev = devm_kzalloc(dev, sizeof(*vdev), GFP_KERNEL);
604 if (!vdev) {
605 ret = -ENOMEM;
606 goto out_group_put;
609 vdev->mc_dev = mc_dev;
611 ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev);
612 if (ret) {
613 dev_err(dev, "VFIO_FSL_MC: Failed to add to vfio group\n");
614 goto out_group_put;
617 ret = vfio_fsl_mc_reflck_attach(vdev);
618 if (ret)
619 goto out_group_dev;
621 ret = vfio_fsl_mc_init_device(vdev);
622 if (ret)
623 goto out_reflck;
625 mutex_init(&vdev->igate);
627 return 0;
629 out_reflck:
630 vfio_fsl_mc_reflck_put(vdev->reflck);
631 out_group_dev:
632 vfio_del_group_dev(dev);
633 out_group_put:
634 vfio_iommu_group_put(group, dev);
635 return ret;
638 static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
640 struct vfio_fsl_mc_device *vdev;
641 struct device *dev = &mc_dev->dev;
643 vdev = vfio_del_group_dev(dev);
644 if (!vdev)
645 return -EINVAL;
647 mutex_destroy(&vdev->igate);
649 vfio_fsl_mc_reflck_put(vdev->reflck);
651 if (is_fsl_mc_bus_dprc(mc_dev)) {
652 dprc_remove_devices(mc_dev, NULL, 0);
653 dprc_cleanup(mc_dev);
656 if (vdev->nb.notifier_call)
657 bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
659 vfio_iommu_group_put(mc_dev->dev.iommu_group, dev);
661 return 0;
664 static struct fsl_mc_driver vfio_fsl_mc_driver = {
665 .probe = vfio_fsl_mc_probe,
666 .remove = vfio_fsl_mc_remove,
667 .driver = {
668 .name = "vfio-fsl-mc",
669 .owner = THIS_MODULE,
673 static int __init vfio_fsl_mc_driver_init(void)
675 return fsl_mc_driver_register(&vfio_fsl_mc_driver);
678 static void __exit vfio_fsl_mc_driver_exit(void)
680 fsl_mc_driver_unregister(&vfio_fsl_mc_driver);
683 module_init(vfio_fsl_mc_driver_init);
684 module_exit(vfio_fsl_mc_driver_exit);
686 MODULE_LICENSE("Dual BSD/GPL");
687 MODULE_DESCRIPTION("VFIO for FSL-MC devices - User Level meta-driver");