1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
3 * Copyright 2013-2016 Freescale Semiconductor Inc.
4 * Copyright 2016-2017,2019-2020 NXP
7 #include <linux/device.h>
8 #include <linux/iommu.h>
9 #include <linux/module.h>
10 #include <linux/mutex.h>
11 #include <linux/slab.h>
12 #include <linux/types.h>
13 #include <linux/vfio.h>
14 #include <linux/fsl/mc.h>
15 #include <linux/delay.h>
16 #include <linux/io-64-nonatomic-hi-lo.h>
18 #include "vfio_fsl_mc_private.h"
20 static struct fsl_mc_driver vfio_fsl_mc_driver
;
22 static int vfio_fsl_mc_open_device(struct vfio_device
*core_vdev
)
24 struct vfio_fsl_mc_device
*vdev
=
25 container_of(core_vdev
, struct vfio_fsl_mc_device
, vdev
);
26 struct fsl_mc_device
*mc_dev
= vdev
->mc_dev
;
27 int count
= mc_dev
->obj_desc
.region_count
;
30 vdev
->regions
= kcalloc(count
, sizeof(struct vfio_fsl_mc_region
),
35 for (i
= 0; i
< count
; i
++) {
36 struct resource
*res
= &mc_dev
->regions
[i
];
37 int no_mmap
= is_fsl_mc_bus_dprc(mc_dev
);
39 vdev
->regions
[i
].addr
= res
->start
;
40 vdev
->regions
[i
].size
= resource_size(res
);
41 vdev
->regions
[i
].type
= mc_dev
->regions
[i
].flags
& IORESOURCE_BITS
;
43 * Only regions addressed with PAGE granularity may be
46 if (!no_mmap
&& !(vdev
->regions
[i
].addr
& ~PAGE_MASK
) &&
47 !(vdev
->regions
[i
].size
& ~PAGE_MASK
))
48 vdev
->regions
[i
].flags
|=
49 VFIO_REGION_INFO_FLAG_MMAP
;
50 vdev
->regions
[i
].flags
|= VFIO_REGION_INFO_FLAG_READ
;
51 if (!(mc_dev
->regions
[i
].flags
& IORESOURCE_READONLY
))
52 vdev
->regions
[i
].flags
|= VFIO_REGION_INFO_FLAG_WRITE
;
58 static void vfio_fsl_mc_regions_cleanup(struct vfio_fsl_mc_device
*vdev
)
60 struct fsl_mc_device
*mc_dev
= vdev
->mc_dev
;
63 for (i
= 0; i
< mc_dev
->obj_desc
.region_count
; i
++)
64 iounmap(vdev
->regions
[i
].ioaddr
);
68 static int vfio_fsl_mc_reset_device(struct vfio_fsl_mc_device
*vdev
)
70 struct fsl_mc_device
*mc_dev
= vdev
->mc_dev
;
73 if (is_fsl_mc_bus_dprc(vdev
->mc_dev
)) {
74 return dprc_reset_container(mc_dev
->mc_io
, 0,
77 DPRC_RESET_OPTION_NON_RECURSIVE
);
81 ret
= fsl_mc_obj_open(mc_dev
->mc_io
, 0, mc_dev
->obj_desc
.id
,
82 mc_dev
->obj_desc
.type
,
86 ret
= fsl_mc_obj_reset(mc_dev
->mc_io
, 0, token
);
88 fsl_mc_obj_close(mc_dev
->mc_io
, 0, token
);
91 ret
= fsl_mc_obj_close(mc_dev
->mc_io
, 0, token
);
97 static void vfio_fsl_mc_close_device(struct vfio_device
*core_vdev
)
99 struct vfio_fsl_mc_device
*vdev
=
100 container_of(core_vdev
, struct vfio_fsl_mc_device
, vdev
);
101 struct fsl_mc_device
*mc_dev
= vdev
->mc_dev
;
102 struct device
*cont_dev
= fsl_mc_cont_dev(&mc_dev
->dev
);
103 struct fsl_mc_device
*mc_cont
= to_fsl_mc_device(cont_dev
);
106 vfio_fsl_mc_regions_cleanup(vdev
);
108 /* reset the device before cleaning up the interrupts */
109 ret
= vfio_fsl_mc_reset_device(vdev
);
112 dev_warn(&mc_cont
->dev
,
113 "VFIO_FSL_MC: reset device has failed (%d)\n", ret
);
115 vfio_fsl_mc_irqs_cleanup(vdev
);
117 fsl_mc_cleanup_irq_pool(mc_cont
);
120 static long vfio_fsl_mc_ioctl(struct vfio_device
*core_vdev
,
121 unsigned int cmd
, unsigned long arg
)
124 struct vfio_fsl_mc_device
*vdev
=
125 container_of(core_vdev
, struct vfio_fsl_mc_device
, vdev
);
126 struct fsl_mc_device
*mc_dev
= vdev
->mc_dev
;
129 case VFIO_DEVICE_GET_INFO
:
131 struct vfio_device_info info
;
133 minsz
= offsetofend(struct vfio_device_info
, num_irqs
);
135 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
138 if (info
.argsz
< minsz
)
141 info
.flags
= VFIO_DEVICE_FLAGS_FSL_MC
;
143 if (is_fsl_mc_bus_dprc(mc_dev
))
144 info
.flags
|= VFIO_DEVICE_FLAGS_RESET
;
146 info
.num_regions
= mc_dev
->obj_desc
.region_count
;
147 info
.num_irqs
= mc_dev
->obj_desc
.irq_count
;
149 return copy_to_user((void __user
*)arg
, &info
, minsz
) ?
152 case VFIO_DEVICE_GET_REGION_INFO
:
154 struct vfio_region_info info
;
156 minsz
= offsetofend(struct vfio_region_info
, offset
);
158 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
161 if (info
.argsz
< minsz
)
164 if (info
.index
>= mc_dev
->obj_desc
.region_count
)
167 /* map offset to the physical address */
168 info
.offset
= VFIO_FSL_MC_INDEX_TO_OFFSET(info
.index
);
169 info
.size
= vdev
->regions
[info
.index
].size
;
170 info
.flags
= vdev
->regions
[info
.index
].flags
;
172 if (copy_to_user((void __user
*)arg
, &info
, minsz
))
176 case VFIO_DEVICE_GET_IRQ_INFO
:
178 struct vfio_irq_info info
;
180 minsz
= offsetofend(struct vfio_irq_info
, count
);
181 if (copy_from_user(&info
, (void __user
*)arg
, minsz
))
184 if (info
.argsz
< minsz
)
187 if (info
.index
>= mc_dev
->obj_desc
.irq_count
)
190 info
.flags
= VFIO_IRQ_INFO_EVENTFD
;
193 if (copy_to_user((void __user
*)arg
, &info
, minsz
))
197 case VFIO_DEVICE_SET_IRQS
:
199 struct vfio_irq_set hdr
;
202 size_t data_size
= 0;
204 minsz
= offsetofend(struct vfio_irq_set
, count
);
206 if (copy_from_user(&hdr
, (void __user
*)arg
, minsz
))
209 ret
= vfio_set_irqs_validate_and_prepare(&hdr
, mc_dev
->obj_desc
.irq_count
,
210 mc_dev
->obj_desc
.irq_count
, &data_size
);
215 data
= memdup_user((void __user
*)(arg
+ minsz
),
218 return PTR_ERR(data
);
221 mutex_lock(&vdev
->igate
);
222 ret
= vfio_fsl_mc_set_irqs_ioctl(vdev
, hdr
.flags
,
223 hdr
.index
, hdr
.start
,
225 mutex_unlock(&vdev
->igate
);
230 case VFIO_DEVICE_RESET
:
232 return vfio_fsl_mc_reset_device(vdev
);
240 static ssize_t
vfio_fsl_mc_read(struct vfio_device
*core_vdev
, char __user
*buf
,
241 size_t count
, loff_t
*ppos
)
243 struct vfio_fsl_mc_device
*vdev
=
244 container_of(core_vdev
, struct vfio_fsl_mc_device
, vdev
);
245 unsigned int index
= VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos
);
246 loff_t off
= *ppos
& VFIO_FSL_MC_OFFSET_MASK
;
247 struct fsl_mc_device
*mc_dev
= vdev
->mc_dev
;
248 struct vfio_fsl_mc_region
*region
;
252 if (index
>= mc_dev
->obj_desc
.region_count
)
255 region
= &vdev
->regions
[index
];
257 if (!(region
->flags
& VFIO_REGION_INFO_FLAG_READ
))
260 if (!region
->ioaddr
) {
261 region
->ioaddr
= ioremap(region
->addr
, region
->size
);
266 if (count
!= 64 || off
!= 0)
269 for (i
= 7; i
>= 0; i
--)
270 data
[i
] = readq(region
->ioaddr
+ i
* sizeof(uint64_t));
272 if (copy_to_user(buf
, data
, 64))
278 #define MC_CMD_COMPLETION_TIMEOUT_MS 5000
279 #define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500
281 static int vfio_fsl_mc_send_command(void __iomem
*ioaddr
, uint64_t *cmd_data
)
284 enum mc_cmd_status status
;
285 unsigned long timeout_usecs
= MC_CMD_COMPLETION_TIMEOUT_MS
* 1000;
287 /* Write at command parameter into portal */
288 for (i
= 7; i
>= 1; i
--)
289 writeq_relaxed(cmd_data
[i
], ioaddr
+ i
* sizeof(uint64_t));
291 /* Write command header in the end */
292 writeq(cmd_data
[0], ioaddr
);
294 /* Wait for response before returning to user-space
295 * This can be optimized in future to even prepare response
296 * before returning to user-space and avoid read ioctl.
300 struct mc_cmd_header
*resp_hdr
;
302 header
= cpu_to_le64(readq_relaxed(ioaddr
));
304 resp_hdr
= (struct mc_cmd_header
*)&header
;
305 status
= (enum mc_cmd_status
)resp_hdr
->status
;
306 if (status
!= MC_CMD_STATUS_READY
)
309 udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS
);
310 timeout_usecs
-= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS
;
311 if (timeout_usecs
== 0)
318 static ssize_t
vfio_fsl_mc_write(struct vfio_device
*core_vdev
,
319 const char __user
*buf
, size_t count
,
322 struct vfio_fsl_mc_device
*vdev
=
323 container_of(core_vdev
, struct vfio_fsl_mc_device
, vdev
);
324 unsigned int index
= VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos
);
325 loff_t off
= *ppos
& VFIO_FSL_MC_OFFSET_MASK
;
326 struct fsl_mc_device
*mc_dev
= vdev
->mc_dev
;
327 struct vfio_fsl_mc_region
*region
;
331 if (index
>= mc_dev
->obj_desc
.region_count
)
334 region
= &vdev
->regions
[index
];
336 if (!(region
->flags
& VFIO_REGION_INFO_FLAG_WRITE
))
339 if (!region
->ioaddr
) {
340 region
->ioaddr
= ioremap(region
->addr
, region
->size
);
345 if (count
!= 64 || off
!= 0)
348 if (copy_from_user(&data
, buf
, 64))
351 ret
= vfio_fsl_mc_send_command(region
->ioaddr
, data
);
359 static int vfio_fsl_mc_mmap_mmio(struct vfio_fsl_mc_region region
,
360 struct vm_area_struct
*vma
)
362 u64 size
= vma
->vm_end
- vma
->vm_start
;
366 pgoff
= vma
->vm_pgoff
&
367 ((1U << (VFIO_FSL_MC_OFFSET_SHIFT
- PAGE_SHIFT
)) - 1);
368 base
= pgoff
<< PAGE_SHIFT
;
370 if (region
.size
< PAGE_SIZE
|| base
+ size
> region
.size
)
373 region_cacheable
= (region
.type
& FSL_MC_REGION_CACHEABLE
) &&
374 (region
.type
& FSL_MC_REGION_SHAREABLE
);
375 if (!region_cacheable
)
376 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
378 vma
->vm_pgoff
= (region
.addr
>> PAGE_SHIFT
) + pgoff
;
380 return remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
381 size
, vma
->vm_page_prot
);
384 static int vfio_fsl_mc_mmap(struct vfio_device
*core_vdev
,
385 struct vm_area_struct
*vma
)
387 struct vfio_fsl_mc_device
*vdev
=
388 container_of(core_vdev
, struct vfio_fsl_mc_device
, vdev
);
389 struct fsl_mc_device
*mc_dev
= vdev
->mc_dev
;
392 index
= vma
->vm_pgoff
>> (VFIO_FSL_MC_OFFSET_SHIFT
- PAGE_SHIFT
);
394 if (vma
->vm_end
< vma
->vm_start
)
396 if (vma
->vm_start
& ~PAGE_MASK
)
398 if (vma
->vm_end
& ~PAGE_MASK
)
400 if (!(vma
->vm_flags
& VM_SHARED
))
402 if (index
>= mc_dev
->obj_desc
.region_count
)
405 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_MMAP
))
408 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_READ
)
409 && (vma
->vm_flags
& VM_READ
))
412 if (!(vdev
->regions
[index
].flags
& VFIO_REGION_INFO_FLAG_WRITE
)
413 && (vma
->vm_flags
& VM_WRITE
))
416 vma
->vm_private_data
= mc_dev
;
418 return vfio_fsl_mc_mmap_mmio(vdev
->regions
[index
], vma
);
421 static const struct vfio_device_ops vfio_fsl_mc_ops
;
422 static int vfio_fsl_mc_bus_notifier(struct notifier_block
*nb
,
423 unsigned long action
, void *data
)
425 struct vfio_fsl_mc_device
*vdev
= container_of(nb
,
426 struct vfio_fsl_mc_device
, nb
);
427 struct device
*dev
= data
;
428 struct fsl_mc_device
*mc_dev
= to_fsl_mc_device(dev
);
429 struct fsl_mc_device
*mc_cont
= to_fsl_mc_device(mc_dev
->dev
.parent
);
431 if (action
== BUS_NOTIFY_ADD_DEVICE
&&
432 vdev
->mc_dev
== mc_cont
) {
433 mc_dev
->driver_override
= kasprintf(GFP_KERNEL
, "%s",
434 vfio_fsl_mc_ops
.name
);
435 if (!mc_dev
->driver_override
)
436 dev_warn(dev
, "VFIO_FSL_MC: Setting driver override for device in dprc %s failed\n",
437 dev_name(&mc_cont
->dev
));
439 dev_info(dev
, "VFIO_FSL_MC: Setting driver override for device in dprc %s\n",
440 dev_name(&mc_cont
->dev
));
441 } else if (action
== BUS_NOTIFY_BOUND_DRIVER
&&
442 vdev
->mc_dev
== mc_cont
) {
443 struct fsl_mc_driver
*mc_drv
= to_fsl_mc_driver(dev
->driver
);
445 if (mc_drv
&& mc_drv
!= &vfio_fsl_mc_driver
)
446 dev_warn(dev
, "VFIO_FSL_MC: Object %s bound to driver %s while DPRC bound to vfio-fsl-mc\n",
447 dev_name(dev
), mc_drv
->driver
.name
);
453 static int vfio_fsl_mc_init_device(struct vfio_fsl_mc_device
*vdev
)
455 struct fsl_mc_device
*mc_dev
= vdev
->mc_dev
;
458 /* Non-dprc devices share mc_io from parent */
459 if (!is_fsl_mc_bus_dprc(mc_dev
)) {
460 struct fsl_mc_device
*mc_cont
= to_fsl_mc_device(mc_dev
->dev
.parent
);
462 mc_dev
->mc_io
= mc_cont
->mc_io
;
466 vdev
->nb
.notifier_call
= vfio_fsl_mc_bus_notifier
;
467 ret
= bus_register_notifier(&fsl_mc_bus_type
, &vdev
->nb
);
471 /* open DPRC, allocate a MC portal */
472 ret
= dprc_setup(mc_dev
);
474 dev_err(&mc_dev
->dev
, "VFIO_FSL_MC: Failed to setup DPRC (%d)\n", ret
);
480 bus_unregister_notifier(&fsl_mc_bus_type
, &vdev
->nb
);
484 static int vfio_fsl_mc_scan_container(struct fsl_mc_device
*mc_dev
)
488 /* non dprc devices do not scan for other devices */
489 if (!is_fsl_mc_bus_dprc(mc_dev
))
491 ret
= dprc_scan_container(mc_dev
, false);
493 dev_err(&mc_dev
->dev
,
494 "VFIO_FSL_MC: Container scanning failed (%d)\n", ret
);
495 dprc_remove_devices(mc_dev
, NULL
, 0);
501 static void vfio_fsl_uninit_device(struct vfio_fsl_mc_device
*vdev
)
503 struct fsl_mc_device
*mc_dev
= vdev
->mc_dev
;
505 if (!is_fsl_mc_bus_dprc(mc_dev
))
508 dprc_cleanup(mc_dev
);
509 bus_unregister_notifier(&fsl_mc_bus_type
, &vdev
->nb
);
512 static int vfio_fsl_mc_init_dev(struct vfio_device
*core_vdev
)
514 struct vfio_fsl_mc_device
*vdev
=
515 container_of(core_vdev
, struct vfio_fsl_mc_device
, vdev
);
516 struct fsl_mc_device
*mc_dev
= to_fsl_mc_device(core_vdev
->dev
);
519 vdev
->mc_dev
= mc_dev
;
520 mutex_init(&vdev
->igate
);
522 if (is_fsl_mc_bus_dprc(mc_dev
))
523 ret
= vfio_assign_device_set(core_vdev
, &mc_dev
->dev
);
525 ret
= vfio_assign_device_set(core_vdev
, mc_dev
->dev
.parent
);
530 /* device_set is released by vfio core if @init fails */
531 return vfio_fsl_mc_init_device(vdev
);
534 static int vfio_fsl_mc_probe(struct fsl_mc_device
*mc_dev
)
536 struct vfio_fsl_mc_device
*vdev
;
537 struct device
*dev
= &mc_dev
->dev
;
540 vdev
= vfio_alloc_device(vfio_fsl_mc_device
, vdev
, dev
,
543 return PTR_ERR(vdev
);
545 ret
= vfio_register_group_dev(&vdev
->vdev
);
547 dev_err(dev
, "VFIO_FSL_MC: Failed to add to vfio group\n");
551 ret
= vfio_fsl_mc_scan_container(mc_dev
);
554 dev_set_drvdata(dev
, vdev
);
558 vfio_unregister_group_dev(&vdev
->vdev
);
560 vfio_put_device(&vdev
->vdev
);
564 static void vfio_fsl_mc_release_dev(struct vfio_device
*core_vdev
)
566 struct vfio_fsl_mc_device
*vdev
=
567 container_of(core_vdev
, struct vfio_fsl_mc_device
, vdev
);
569 vfio_fsl_uninit_device(vdev
);
570 mutex_destroy(&vdev
->igate
);
573 static void vfio_fsl_mc_remove(struct fsl_mc_device
*mc_dev
)
575 struct device
*dev
= &mc_dev
->dev
;
576 struct vfio_fsl_mc_device
*vdev
= dev_get_drvdata(dev
);
578 vfio_unregister_group_dev(&vdev
->vdev
);
579 dprc_remove_devices(mc_dev
, NULL
, 0);
580 vfio_put_device(&vdev
->vdev
);
583 static const struct vfio_device_ops vfio_fsl_mc_ops
= {
584 .name
= "vfio-fsl-mc",
585 .init
= vfio_fsl_mc_init_dev
,
586 .release
= vfio_fsl_mc_release_dev
,
587 .open_device
= vfio_fsl_mc_open_device
,
588 .close_device
= vfio_fsl_mc_close_device
,
589 .ioctl
= vfio_fsl_mc_ioctl
,
590 .read
= vfio_fsl_mc_read
,
591 .write
= vfio_fsl_mc_write
,
592 .mmap
= vfio_fsl_mc_mmap
,
593 .bind_iommufd
= vfio_iommufd_physical_bind
,
594 .unbind_iommufd
= vfio_iommufd_physical_unbind
,
595 .attach_ioas
= vfio_iommufd_physical_attach_ioas
,
596 .detach_ioas
= vfio_iommufd_physical_detach_ioas
,
599 static struct fsl_mc_driver vfio_fsl_mc_driver
= {
600 .probe
= vfio_fsl_mc_probe
,
601 .remove
= vfio_fsl_mc_remove
,
603 .name
= "vfio-fsl-mc",
605 .driver_managed_dma
= true,
608 module_fsl_mc_driver(vfio_fsl_mc_driver
);
610 MODULE_LICENSE("Dual BSD/GPL");
611 MODULE_DESCRIPTION("VFIO for FSL-MC devices - User Level meta-driver");