4 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
5 * Author: Alex Williamson <alex.williamson@redhat.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Derived from original vfio:
12 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
13 * Author: Tom Lyon, pugs@cisco.com
16 #include <linux/cdev.h>
17 #include <linux/compat.h>
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/anon_inodes.h>
22 #include <linux/idr.h>
23 #include <linux/iommu.h>
24 #include <linux/list.h>
25 #include <linux/miscdevice.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/rwsem.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/stat.h>
32 #include <linux/string.h>
33 #include <linux/uaccess.h>
34 #include <linux/vfio.h>
35 #include <linux/wait.h>
37 #define DRIVER_VERSION "0.3"
38 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
39 #define DRIVER_DESC "VFIO - User Level meta-driver"
43 struct list_head iommu_drivers_list
;
44 struct mutex iommu_drivers_lock
;
45 struct list_head group_list
;
47 struct mutex group_lock
;
48 struct cdev group_cdev
;
50 wait_queue_head_t release_q
;
53 struct vfio_iommu_driver
{
54 const struct vfio_iommu_driver_ops
*ops
;
55 struct list_head vfio_next
;
58 struct vfio_container
{
60 struct list_head group_list
;
61 struct rw_semaphore group_lock
;
62 struct vfio_iommu_driver
*iommu_driver
;
66 struct vfio_unbound_dev
{
68 struct list_head unbound_next
;
74 atomic_t container_users
;
75 struct iommu_group
*iommu_group
;
76 struct vfio_container
*container
;
77 struct list_head device_list
;
78 struct mutex device_lock
;
80 struct notifier_block nb
;
81 struct list_head vfio_next
;
82 struct list_head container_next
;
83 struct list_head unbound_list
;
84 struct mutex unbound_lock
;
91 const struct vfio_device_ops
*ops
;
92 struct vfio_group
*group
;
93 struct list_head group_next
;
98 * IOMMU driver registration
100 int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops
*ops
)
102 struct vfio_iommu_driver
*driver
, *tmp
;
104 driver
= kzalloc(sizeof(*driver
), GFP_KERNEL
);
110 mutex_lock(&vfio
.iommu_drivers_lock
);
112 /* Check for duplicates */
113 list_for_each_entry(tmp
, &vfio
.iommu_drivers_list
, vfio_next
) {
114 if (tmp
->ops
== ops
) {
115 mutex_unlock(&vfio
.iommu_drivers_lock
);
121 list_add(&driver
->vfio_next
, &vfio
.iommu_drivers_list
);
123 mutex_unlock(&vfio
.iommu_drivers_lock
);
127 EXPORT_SYMBOL_GPL(vfio_register_iommu_driver
);
129 void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops
*ops
)
131 struct vfio_iommu_driver
*driver
;
133 mutex_lock(&vfio
.iommu_drivers_lock
);
134 list_for_each_entry(driver
, &vfio
.iommu_drivers_list
, vfio_next
) {
135 if (driver
->ops
== ops
) {
136 list_del(&driver
->vfio_next
);
137 mutex_unlock(&vfio
.iommu_drivers_lock
);
142 mutex_unlock(&vfio
.iommu_drivers_lock
);
144 EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver
);
147 * Group minor allocation/free - both called with vfio.group_lock held
149 static int vfio_alloc_group_minor(struct vfio_group
*group
)
151 return idr_alloc(&vfio
.group_idr
, group
, 0, MINORMASK
+ 1, GFP_KERNEL
);
154 static void vfio_free_group_minor(int minor
)
156 idr_remove(&vfio
.group_idr
, minor
);
159 static int vfio_iommu_group_notifier(struct notifier_block
*nb
,
160 unsigned long action
, void *data
);
161 static void vfio_group_get(struct vfio_group
*group
);
164 * Container objects - containers are created when /dev/vfio/vfio is
165 * opened, but their lifecycle extends until the last user is done, so
166 * it's freed via kref. Must support container/group/device being
167 * closed in any order.
169 static void vfio_container_get(struct vfio_container
*container
)
171 kref_get(&container
->kref
);
174 static void vfio_container_release(struct kref
*kref
)
176 struct vfio_container
*container
;
177 container
= container_of(kref
, struct vfio_container
, kref
);
182 static void vfio_container_put(struct vfio_container
*container
)
184 kref_put(&container
->kref
, vfio_container_release
);
187 static void vfio_group_unlock_and_free(struct vfio_group
*group
)
189 mutex_unlock(&vfio
.group_lock
);
191 * Unregister outside of lock. A spurious callback is harmless now
192 * that the group is no longer in vfio.group_list.
194 iommu_group_unregister_notifier(group
->iommu_group
, &group
->nb
);
199 * Group objects - create, release, get, put, search
201 static struct vfio_group
*vfio_create_group(struct iommu_group
*iommu_group
)
203 struct vfio_group
*group
, *tmp
;
207 group
= kzalloc(sizeof(*group
), GFP_KERNEL
);
209 return ERR_PTR(-ENOMEM
);
211 kref_init(&group
->kref
);
212 INIT_LIST_HEAD(&group
->device_list
);
213 mutex_init(&group
->device_lock
);
214 INIT_LIST_HEAD(&group
->unbound_list
);
215 mutex_init(&group
->unbound_lock
);
216 atomic_set(&group
->container_users
, 0);
217 atomic_set(&group
->opened
, 0);
218 group
->iommu_group
= iommu_group
;
220 group
->nb
.notifier_call
= vfio_iommu_group_notifier
;
223 * blocking notifiers acquire a rwsem around registering and hold
224 * it around callback. Therefore, need to register outside of
225 * vfio.group_lock to avoid A-B/B-A contention. Our callback won't
226 * do anything unless it can find the group in vfio.group_list, so
227 * no harm in registering early.
229 ret
= iommu_group_register_notifier(iommu_group
, &group
->nb
);
235 mutex_lock(&vfio
.group_lock
);
237 minor
= vfio_alloc_group_minor(group
);
239 vfio_group_unlock_and_free(group
);
240 return ERR_PTR(minor
);
243 /* Did we race creating this group? */
244 list_for_each_entry(tmp
, &vfio
.group_list
, vfio_next
) {
245 if (tmp
->iommu_group
== iommu_group
) {
247 vfio_free_group_minor(minor
);
248 vfio_group_unlock_and_free(group
);
253 dev
= device_create(vfio
.class, NULL
,
254 MKDEV(MAJOR(vfio
.group_devt
), minor
),
255 group
, "%d", iommu_group_id(iommu_group
));
257 vfio_free_group_minor(minor
);
258 vfio_group_unlock_and_free(group
);
259 return (struct vfio_group
*)dev
; /* ERR_PTR */
262 group
->minor
= minor
;
265 list_add(&group
->vfio_next
, &vfio
.group_list
);
267 mutex_unlock(&vfio
.group_lock
);
272 /* called with vfio.group_lock held */
273 static void vfio_group_release(struct kref
*kref
)
275 struct vfio_group
*group
= container_of(kref
, struct vfio_group
, kref
);
276 struct vfio_unbound_dev
*unbound
, *tmp
;
277 struct iommu_group
*iommu_group
= group
->iommu_group
;
279 WARN_ON(!list_empty(&group
->device_list
));
281 list_for_each_entry_safe(unbound
, tmp
,
282 &group
->unbound_list
, unbound_next
) {
283 list_del(&unbound
->unbound_next
);
287 device_destroy(vfio
.class, MKDEV(MAJOR(vfio
.group_devt
), group
->minor
));
288 list_del(&group
->vfio_next
);
289 vfio_free_group_minor(group
->minor
);
290 vfio_group_unlock_and_free(group
);
291 iommu_group_put(iommu_group
);
294 static void vfio_group_put(struct vfio_group
*group
)
296 kref_put_mutex(&group
->kref
, vfio_group_release
, &vfio
.group_lock
);
299 /* Assume group_lock or group reference is held */
300 static void vfio_group_get(struct vfio_group
*group
)
302 kref_get(&group
->kref
);
306 * Not really a try as we will sleep for mutex, but we need to make
307 * sure the group pointer is valid under lock and get a reference.
309 static struct vfio_group
*vfio_group_try_get(struct vfio_group
*group
)
311 struct vfio_group
*target
= group
;
313 mutex_lock(&vfio
.group_lock
);
314 list_for_each_entry(group
, &vfio
.group_list
, vfio_next
) {
315 if (group
== target
) {
316 vfio_group_get(group
);
317 mutex_unlock(&vfio
.group_lock
);
321 mutex_unlock(&vfio
.group_lock
);
327 struct vfio_group
*vfio_group_get_from_iommu(struct iommu_group
*iommu_group
)
329 struct vfio_group
*group
;
331 mutex_lock(&vfio
.group_lock
);
332 list_for_each_entry(group
, &vfio
.group_list
, vfio_next
) {
333 if (group
->iommu_group
== iommu_group
) {
334 vfio_group_get(group
);
335 mutex_unlock(&vfio
.group_lock
);
339 mutex_unlock(&vfio
.group_lock
);
344 static struct vfio_group
*vfio_group_get_from_minor(int minor
)
346 struct vfio_group
*group
;
348 mutex_lock(&vfio
.group_lock
);
349 group
= idr_find(&vfio
.group_idr
, minor
);
351 mutex_unlock(&vfio
.group_lock
);
354 vfio_group_get(group
);
355 mutex_unlock(&vfio
.group_lock
);
361 * Device objects - create, release, get, put, search
364 struct vfio_device
*vfio_group_create_device(struct vfio_group
*group
,
366 const struct vfio_device_ops
*ops
,
369 struct vfio_device
*device
;
371 device
= kzalloc(sizeof(*device
), GFP_KERNEL
);
373 return ERR_PTR(-ENOMEM
);
375 kref_init(&device
->kref
);
377 device
->group
= group
;
379 device
->device_data
= device_data
;
380 dev_set_drvdata(dev
, device
);
382 /* No need to get group_lock, caller has group reference */
383 vfio_group_get(group
);
385 mutex_lock(&group
->device_lock
);
386 list_add(&device
->group_next
, &group
->device_list
);
387 mutex_unlock(&group
->device_lock
);
392 static void vfio_device_release(struct kref
*kref
)
394 struct vfio_device
*device
= container_of(kref
,
395 struct vfio_device
, kref
);
396 struct vfio_group
*group
= device
->group
;
398 list_del(&device
->group_next
);
399 mutex_unlock(&group
->device_lock
);
401 dev_set_drvdata(device
->dev
, NULL
);
405 /* vfio_del_group_dev may be waiting for this device */
406 wake_up(&vfio
.release_q
);
409 /* Device reference always implies a group reference */
410 void vfio_device_put(struct vfio_device
*device
)
412 struct vfio_group
*group
= device
->group
;
413 kref_put_mutex(&device
->kref
, vfio_device_release
, &group
->device_lock
);
414 vfio_group_put(group
);
416 EXPORT_SYMBOL_GPL(vfio_device_put
);
418 static void vfio_device_get(struct vfio_device
*device
)
420 vfio_group_get(device
->group
);
421 kref_get(&device
->kref
);
424 static struct vfio_device
*vfio_group_get_device(struct vfio_group
*group
,
427 struct vfio_device
*device
;
429 mutex_lock(&group
->device_lock
);
430 list_for_each_entry(device
, &group
->device_list
, group_next
) {
431 if (device
->dev
== dev
) {
432 vfio_device_get(device
);
433 mutex_unlock(&group
->device_lock
);
437 mutex_unlock(&group
->device_lock
);
442 * Whitelist some drivers that we know are safe (no dma) or just sit on
443 * a device. It's not always practical to leave a device within a group
444 * driverless as it could get re-bound to something unsafe.
446 static const char * const vfio_driver_whitelist
[] = { "pci-stub", "pcieport" };
448 static bool vfio_whitelisted_driver(struct device_driver
*drv
)
452 for (i
= 0; i
< ARRAY_SIZE(vfio_driver_whitelist
); i
++) {
453 if (!strcmp(drv
->name
, vfio_driver_whitelist
[i
]))
461 * A vfio group is viable for use by userspace if all devices are in
462 * one of the following states:
464 * - bound to a vfio driver
465 * - bound to a whitelisted driver
467 * We use two methods to determine whether a device is bound to a vfio
468 * driver. The first is to test whether the device exists in the vfio
469 * group. The second is to test if the device exists on the group
470 * unbound_list, indicating it's in the middle of transitioning from
471 * a vfio driver to driver-less.
473 static int vfio_dev_viable(struct device
*dev
, void *data
)
475 struct vfio_group
*group
= data
;
476 struct vfio_device
*device
;
477 struct device_driver
*drv
= ACCESS_ONCE(dev
->driver
);
478 struct vfio_unbound_dev
*unbound
;
481 mutex_lock(&group
->unbound_lock
);
482 list_for_each_entry(unbound
, &group
->unbound_list
, unbound_next
) {
483 if (dev
== unbound
->dev
) {
488 mutex_unlock(&group
->unbound_lock
);
490 if (!ret
|| !drv
|| vfio_whitelisted_driver(drv
))
493 device
= vfio_group_get_device(group
, dev
);
495 vfio_device_put(device
);
503 * Async device support
505 static int vfio_group_nb_add_dev(struct vfio_group
*group
, struct device
*dev
)
507 struct vfio_device
*device
;
509 /* Do we already know about it? We shouldn't */
510 device
= vfio_group_get_device(group
, dev
);
511 if (WARN_ON_ONCE(device
)) {
512 vfio_device_put(device
);
516 /* Nothing to do for idle groups */
517 if (!atomic_read(&group
->container_users
))
520 /* TODO Prevent device auto probing */
521 WARN("Device %s added to live group %d!\n", dev_name(dev
),
522 iommu_group_id(group
->iommu_group
));
527 static int vfio_group_nb_verify(struct vfio_group
*group
, struct device
*dev
)
529 /* We don't care what happens when the group isn't in use */
530 if (!atomic_read(&group
->container_users
))
533 return vfio_dev_viable(dev
, group
);
536 static int vfio_iommu_group_notifier(struct notifier_block
*nb
,
537 unsigned long action
, void *data
)
539 struct vfio_group
*group
= container_of(nb
, struct vfio_group
, nb
);
540 struct device
*dev
= data
;
541 struct vfio_unbound_dev
*unbound
;
544 * Need to go through a group_lock lookup to get a reference or we
545 * risk racing a group being removed. Ignore spurious notifies.
547 group
= vfio_group_try_get(group
);
552 case IOMMU_GROUP_NOTIFY_ADD_DEVICE
:
553 vfio_group_nb_add_dev(group
, dev
);
555 case IOMMU_GROUP_NOTIFY_DEL_DEVICE
:
557 * Nothing to do here. If the device is in use, then the
558 * vfio sub-driver should block the remove callback until
559 * it is unused. If the device is unused or attached to a
560 * stub driver, then it should be released and we don't
561 * care that it will be going away.
564 case IOMMU_GROUP_NOTIFY_BIND_DRIVER
:
565 pr_debug("%s: Device %s, group %d binding to driver\n",
566 __func__
, dev_name(dev
),
567 iommu_group_id(group
->iommu_group
));
569 case IOMMU_GROUP_NOTIFY_BOUND_DRIVER
:
570 pr_debug("%s: Device %s, group %d bound to driver %s\n",
571 __func__
, dev_name(dev
),
572 iommu_group_id(group
->iommu_group
), dev
->driver
->name
);
573 BUG_ON(vfio_group_nb_verify(group
, dev
));
575 case IOMMU_GROUP_NOTIFY_UNBIND_DRIVER
:
576 pr_debug("%s: Device %s, group %d unbinding from driver %s\n",
577 __func__
, dev_name(dev
),
578 iommu_group_id(group
->iommu_group
), dev
->driver
->name
);
580 case IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER
:
581 pr_debug("%s: Device %s, group %d unbound from driver\n",
582 __func__
, dev_name(dev
),
583 iommu_group_id(group
->iommu_group
));
585 * XXX An unbound device in a live group is ok, but we'd
586 * really like to avoid the above BUG_ON by preventing other
587 * drivers from binding to it. Once that occurs, we have to
588 * stop the system to maintain isolation. At a minimum, we'd
589 * want a toggle to disable driver auto probe for this device.
592 mutex_lock(&group
->unbound_lock
);
593 list_for_each_entry(unbound
,
594 &group
->unbound_list
, unbound_next
) {
595 if (dev
== unbound
->dev
) {
596 list_del(&unbound
->unbound_next
);
601 mutex_unlock(&group
->unbound_lock
);
605 vfio_group_put(group
);
612 int vfio_add_group_dev(struct device
*dev
,
613 const struct vfio_device_ops
*ops
, void *device_data
)
615 struct iommu_group
*iommu_group
;
616 struct vfio_group
*group
;
617 struct vfio_device
*device
;
619 iommu_group
= iommu_group_get(dev
);
623 group
= vfio_group_get_from_iommu(iommu_group
);
625 group
= vfio_create_group(iommu_group
);
627 iommu_group_put(iommu_group
);
628 return PTR_ERR(group
);
632 * A found vfio_group already holds a reference to the
633 * iommu_group. A created vfio_group keeps the reference.
635 iommu_group_put(iommu_group
);
638 device
= vfio_group_get_device(group
, dev
);
640 WARN(1, "Device %s already exists on group %d\n",
641 dev_name(dev
), iommu_group_id(iommu_group
));
642 vfio_device_put(device
);
643 vfio_group_put(group
);
647 device
= vfio_group_create_device(group
, dev
, ops
, device_data
);
648 if (IS_ERR(device
)) {
649 vfio_group_put(group
);
650 return PTR_ERR(device
);
654 * Drop all but the vfio_device reference. The vfio_device holds
655 * a reference to the vfio_group, which holds a reference to the
658 vfio_group_put(group
);
662 EXPORT_SYMBOL_GPL(vfio_add_group_dev
);
665 * Get a reference to the vfio_device for a device that is known to
666 * be bound to a vfio driver. The driver implicitly holds a
667 * vfio_device reference between vfio_add_group_dev and
668 * vfio_del_group_dev. We can therefore use drvdata to increment
669 * that reference from the struct device. This additional
670 * reference must be released by calling vfio_device_put.
672 struct vfio_device
*vfio_device_get_from_dev(struct device
*dev
)
674 struct vfio_device
*device
= dev_get_drvdata(dev
);
676 vfio_device_get(device
);
680 EXPORT_SYMBOL_GPL(vfio_device_get_from_dev
);
683 * Caller must hold a reference to the vfio_device
685 void *vfio_device_data(struct vfio_device
*device
)
687 return device
->device_data
;
689 EXPORT_SYMBOL_GPL(vfio_device_data
);
691 /* Given a referenced group, check if it contains the device */
692 static bool vfio_dev_present(struct vfio_group
*group
, struct device
*dev
)
694 struct vfio_device
*device
;
696 device
= vfio_group_get_device(group
, dev
);
700 vfio_device_put(device
);
705 * Decrement the device reference count and wait for the device to be
706 * removed. Open file descriptors for the device... */
707 void *vfio_del_group_dev(struct device
*dev
)
709 struct vfio_device
*device
= dev_get_drvdata(dev
);
710 struct vfio_group
*group
= device
->group
;
711 void *device_data
= device
->device_data
;
712 struct vfio_unbound_dev
*unbound
;
716 * The group exists so long as we have a device reference. Get
717 * a group reference and use it to scan for the device going away.
719 vfio_group_get(group
);
722 * When the device is removed from the group, the group suddenly
723 * becomes non-viable; the device has a driver (until the unbind
724 * completes), but it's not present in the group. This is bad news
725 * for any external users that need to re-acquire a group reference
726 * in order to match and release their existing reference. To
727 * solve this, we track such devices on the unbound_list to bridge
728 * the gap until they're fully unbound.
730 unbound
= kzalloc(sizeof(*unbound
), GFP_KERNEL
);
733 mutex_lock(&group
->unbound_lock
);
734 list_add(&unbound
->unbound_next
, &group
->unbound_list
);
735 mutex_unlock(&group
->unbound_lock
);
739 vfio_device_put(device
);
742 * If the device is still present in the group after the above
743 * 'put', then it is in use and we need to request it from the
744 * bus driver. The driver may in turn need to request the
745 * device from the user. We send the request on an arbitrary
746 * interval with counter to allow the driver to take escalating
747 * measures to release the device if it has the ability to do so.
750 device
= vfio_group_get_device(group
, dev
);
754 if (device
->ops
->request
)
755 device
->ops
->request(device_data
, i
++);
757 vfio_device_put(device
);
759 } while (wait_event_interruptible_timeout(vfio
.release_q
,
760 !vfio_dev_present(group
, dev
),
763 vfio_group_put(group
);
767 EXPORT_SYMBOL_GPL(vfio_del_group_dev
);
770 * VFIO base fd, /dev/vfio/vfio
772 static long vfio_ioctl_check_extension(struct vfio_container
*container
,
775 struct vfio_iommu_driver
*driver
;
778 down_read(&container
->group_lock
);
780 driver
= container
->iommu_driver
;
783 /* No base extensions yet */
786 * If no driver is set, poll all registered drivers for
787 * extensions and return the first positive result. If
788 * a driver is already set, further queries will be passed
789 * only to that driver.
792 mutex_lock(&vfio
.iommu_drivers_lock
);
793 list_for_each_entry(driver
, &vfio
.iommu_drivers_list
,
795 if (!try_module_get(driver
->ops
->owner
))
798 ret
= driver
->ops
->ioctl(NULL
,
799 VFIO_CHECK_EXTENSION
,
801 module_put(driver
->ops
->owner
);
805 mutex_unlock(&vfio
.iommu_drivers_lock
);
807 ret
= driver
->ops
->ioctl(container
->iommu_data
,
808 VFIO_CHECK_EXTENSION
, arg
);
811 up_read(&container
->group_lock
);
816 /* hold write lock on container->group_lock */
817 static int __vfio_container_attach_groups(struct vfio_container
*container
,
818 struct vfio_iommu_driver
*driver
,
821 struct vfio_group
*group
;
824 list_for_each_entry(group
, &container
->group_list
, container_next
) {
825 ret
= driver
->ops
->attach_group(data
, group
->iommu_group
);
833 list_for_each_entry_continue_reverse(group
, &container
->group_list
,
835 driver
->ops
->detach_group(data
, group
->iommu_group
);
841 static long vfio_ioctl_set_iommu(struct vfio_container
*container
,
844 struct vfio_iommu_driver
*driver
;
847 down_write(&container
->group_lock
);
850 * The container is designed to be an unprivileged interface while
851 * the group can be assigned to specific users. Therefore, only by
852 * adding a group to a container does the user get the privilege of
853 * enabling the iommu, which may allocate finite resources. There
854 * is no unset_iommu, but by removing all the groups from a container,
855 * the container is deprivileged and returns to an unset state.
857 if (list_empty(&container
->group_list
) || container
->iommu_driver
) {
858 up_write(&container
->group_lock
);
862 mutex_lock(&vfio
.iommu_drivers_lock
);
863 list_for_each_entry(driver
, &vfio
.iommu_drivers_list
, vfio_next
) {
866 if (!try_module_get(driver
->ops
->owner
))
870 * The arg magic for SET_IOMMU is the same as CHECK_EXTENSION,
871 * so test which iommu driver reported support for this
872 * extension and call open on them. We also pass them the
873 * magic, allowing a single driver to support multiple
874 * interfaces if they'd like.
876 if (driver
->ops
->ioctl(NULL
, VFIO_CHECK_EXTENSION
, arg
) <= 0) {
877 module_put(driver
->ops
->owner
);
881 /* module reference holds the driver we're working on */
882 mutex_unlock(&vfio
.iommu_drivers_lock
);
884 data
= driver
->ops
->open(arg
);
887 module_put(driver
->ops
->owner
);
888 goto skip_drivers_unlock
;
891 ret
= __vfio_container_attach_groups(container
, driver
, data
);
893 container
->iommu_driver
= driver
;
894 container
->iommu_data
= data
;
896 driver
->ops
->release(data
);
897 module_put(driver
->ops
->owner
);
900 goto skip_drivers_unlock
;
903 mutex_unlock(&vfio
.iommu_drivers_lock
);
905 up_write(&container
->group_lock
);
910 static long vfio_fops_unl_ioctl(struct file
*filep
,
911 unsigned int cmd
, unsigned long arg
)
913 struct vfio_container
*container
= filep
->private_data
;
914 struct vfio_iommu_driver
*driver
;
922 case VFIO_GET_API_VERSION
:
923 ret
= VFIO_API_VERSION
;
925 case VFIO_CHECK_EXTENSION
:
926 ret
= vfio_ioctl_check_extension(container
, arg
);
929 ret
= vfio_ioctl_set_iommu(container
, arg
);
932 down_read(&container
->group_lock
);
934 driver
= container
->iommu_driver
;
935 data
= container
->iommu_data
;
937 if (driver
) /* passthrough all unrecognized ioctls */
938 ret
= driver
->ops
->ioctl(data
, cmd
, arg
);
940 up_read(&container
->group_lock
);
947 static long vfio_fops_compat_ioctl(struct file
*filep
,
948 unsigned int cmd
, unsigned long arg
)
950 arg
= (unsigned long)compat_ptr(arg
);
951 return vfio_fops_unl_ioctl(filep
, cmd
, arg
);
953 #endif /* CONFIG_COMPAT */
955 static int vfio_fops_open(struct inode
*inode
, struct file
*filep
)
957 struct vfio_container
*container
;
959 container
= kzalloc(sizeof(*container
), GFP_KERNEL
);
963 INIT_LIST_HEAD(&container
->group_list
);
964 init_rwsem(&container
->group_lock
);
965 kref_init(&container
->kref
);
967 filep
->private_data
= container
;
972 static int vfio_fops_release(struct inode
*inode
, struct file
*filep
)
974 struct vfio_container
*container
= filep
->private_data
;
976 filep
->private_data
= NULL
;
978 vfio_container_put(container
);
984 * Once an iommu driver is set, we optionally pass read/write/mmap
985 * on to the driver, allowing management interfaces beyond ioctl.
987 static ssize_t
vfio_fops_read(struct file
*filep
, char __user
*buf
,
988 size_t count
, loff_t
*ppos
)
990 struct vfio_container
*container
= filep
->private_data
;
991 struct vfio_iommu_driver
*driver
;
992 ssize_t ret
= -EINVAL
;
994 down_read(&container
->group_lock
);
996 driver
= container
->iommu_driver
;
997 if (likely(driver
&& driver
->ops
->read
))
998 ret
= driver
->ops
->read(container
->iommu_data
,
1001 up_read(&container
->group_lock
);
1006 static ssize_t
vfio_fops_write(struct file
*filep
, const char __user
*buf
,
1007 size_t count
, loff_t
*ppos
)
1009 struct vfio_container
*container
= filep
->private_data
;
1010 struct vfio_iommu_driver
*driver
;
1011 ssize_t ret
= -EINVAL
;
1013 down_read(&container
->group_lock
);
1015 driver
= container
->iommu_driver
;
1016 if (likely(driver
&& driver
->ops
->write
))
1017 ret
= driver
->ops
->write(container
->iommu_data
,
1020 up_read(&container
->group_lock
);
1025 static int vfio_fops_mmap(struct file
*filep
, struct vm_area_struct
*vma
)
1027 struct vfio_container
*container
= filep
->private_data
;
1028 struct vfio_iommu_driver
*driver
;
1031 down_read(&container
->group_lock
);
1033 driver
= container
->iommu_driver
;
1034 if (likely(driver
&& driver
->ops
->mmap
))
1035 ret
= driver
->ops
->mmap(container
->iommu_data
, vma
);
1037 up_read(&container
->group_lock
);
1042 static const struct file_operations vfio_fops
= {
1043 .owner
= THIS_MODULE
,
1044 .open
= vfio_fops_open
,
1045 .release
= vfio_fops_release
,
1046 .read
= vfio_fops_read
,
1047 .write
= vfio_fops_write
,
1048 .unlocked_ioctl
= vfio_fops_unl_ioctl
,
1049 #ifdef CONFIG_COMPAT
1050 .compat_ioctl
= vfio_fops_compat_ioctl
,
1052 .mmap
= vfio_fops_mmap
,
1056 * VFIO Group fd, /dev/vfio/$GROUP
1058 static void __vfio_group_unset_container(struct vfio_group
*group
)
1060 struct vfio_container
*container
= group
->container
;
1061 struct vfio_iommu_driver
*driver
;
1063 down_write(&container
->group_lock
);
1065 driver
= container
->iommu_driver
;
1067 driver
->ops
->detach_group(container
->iommu_data
,
1068 group
->iommu_group
);
1070 group
->container
= NULL
;
1071 list_del(&group
->container_next
);
1073 /* Detaching the last group deprivileges a container, remove iommu */
1074 if (driver
&& list_empty(&container
->group_list
)) {
1075 driver
->ops
->release(container
->iommu_data
);
1076 module_put(driver
->ops
->owner
);
1077 container
->iommu_driver
= NULL
;
1078 container
->iommu_data
= NULL
;
1081 up_write(&container
->group_lock
);
1083 vfio_container_put(container
);
1087 * VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or
1088 * if there was no container to unset. Since the ioctl is called on
1089 * the group, we know that still exists, therefore the only valid
1090 * transition here is 1->0.
1092 static int vfio_group_unset_container(struct vfio_group
*group
)
1094 int users
= atomic_cmpxchg(&group
->container_users
, 1, 0);
1101 __vfio_group_unset_container(group
);
1107 * When removing container users, anything that removes the last user
1108 * implicitly removes the group from the container. That is, if the
1109 * group file descriptor is closed, as well as any device file descriptors,
1110 * the group is free.
1112 static void vfio_group_try_dissolve_container(struct vfio_group
*group
)
1114 if (0 == atomic_dec_if_positive(&group
->container_users
))
1115 __vfio_group_unset_container(group
);
1118 static int vfio_group_set_container(struct vfio_group
*group
, int container_fd
)
1121 struct vfio_container
*container
;
1122 struct vfio_iommu_driver
*driver
;
1125 if (atomic_read(&group
->container_users
))
1128 f
= fdget(container_fd
);
1132 /* Sanity check, is this really our fd? */
1133 if (f
.file
->f_op
!= &vfio_fops
) {
1138 container
= f
.file
->private_data
;
1139 WARN_ON(!container
); /* fget ensures we don't race vfio_release */
1141 down_write(&container
->group_lock
);
1143 driver
= container
->iommu_driver
;
1145 ret
= driver
->ops
->attach_group(container
->iommu_data
,
1146 group
->iommu_group
);
1151 group
->container
= container
;
1152 list_add(&group
->container_next
, &container
->group_list
);
1154 /* Get a reference on the container and mark a user within the group */
1155 vfio_container_get(container
);
1156 atomic_inc(&group
->container_users
);
1159 up_write(&container
->group_lock
);
1164 static bool vfio_group_viable(struct vfio_group
*group
)
1166 return (iommu_group_for_each_dev(group
->iommu_group
,
1167 group
, vfio_dev_viable
) == 0);
1170 static const struct file_operations vfio_device_fops
;
1172 static int vfio_group_get_device_fd(struct vfio_group
*group
, char *buf
)
1174 struct vfio_device
*device
;
1178 if (0 == atomic_read(&group
->container_users
) ||
1179 !group
->container
->iommu_driver
|| !vfio_group_viable(group
))
1182 mutex_lock(&group
->device_lock
);
1183 list_for_each_entry(device
, &group
->device_list
, group_next
) {
1184 if (strcmp(dev_name(device
->dev
), buf
))
1187 ret
= device
->ops
->open(device
->device_data
);
1191 * We can't use anon_inode_getfd() because we need to modify
1192 * the f_mode flags directly to allow more than just ioctls
1194 ret
= get_unused_fd_flags(O_CLOEXEC
);
1196 device
->ops
->release(device
->device_data
);
1200 filep
= anon_inode_getfile("[vfio-device]", &vfio_device_fops
,
1202 if (IS_ERR(filep
)) {
1204 ret
= PTR_ERR(filep
);
1205 device
->ops
->release(device
->device_data
);
1210 * TODO: add an anon_inode interface to do this.
1211 * Appears to be missing by lack of need rather than
1212 * explicitly prevented. Now there's need.
1214 filep
->f_mode
|= (FMODE_LSEEK
| FMODE_PREAD
| FMODE_PWRITE
);
1216 vfio_device_get(device
);
1217 atomic_inc(&group
->container_users
);
1219 fd_install(ret
, filep
);
1222 mutex_unlock(&group
->device_lock
);
1227 static long vfio_group_fops_unl_ioctl(struct file
*filep
,
1228 unsigned int cmd
, unsigned long arg
)
1230 struct vfio_group
*group
= filep
->private_data
;
1234 case VFIO_GROUP_GET_STATUS
:
1236 struct vfio_group_status status
;
1237 unsigned long minsz
;
1239 minsz
= offsetofend(struct vfio_group_status
, flags
);
1241 if (copy_from_user(&status
, (void __user
*)arg
, minsz
))
1244 if (status
.argsz
< minsz
)
1249 if (vfio_group_viable(group
))
1250 status
.flags
|= VFIO_GROUP_FLAGS_VIABLE
;
1252 if (group
->container
)
1253 status
.flags
|= VFIO_GROUP_FLAGS_CONTAINER_SET
;
1255 if (copy_to_user((void __user
*)arg
, &status
, minsz
))
1261 case VFIO_GROUP_SET_CONTAINER
:
1265 if (get_user(fd
, (int __user
*)arg
))
1271 ret
= vfio_group_set_container(group
, fd
);
1274 case VFIO_GROUP_UNSET_CONTAINER
:
1275 ret
= vfio_group_unset_container(group
);
1277 case VFIO_GROUP_GET_DEVICE_FD
:
1281 buf
= strndup_user((const char __user
*)arg
, PAGE_SIZE
);
1283 return PTR_ERR(buf
);
1285 ret
= vfio_group_get_device_fd(group
, buf
);
1294 #ifdef CONFIG_COMPAT
1295 static long vfio_group_fops_compat_ioctl(struct file
*filep
,
1296 unsigned int cmd
, unsigned long arg
)
1298 arg
= (unsigned long)compat_ptr(arg
);
1299 return vfio_group_fops_unl_ioctl(filep
, cmd
, arg
);
1301 #endif /* CONFIG_COMPAT */
1303 static int vfio_group_fops_open(struct inode
*inode
, struct file
*filep
)
1305 struct vfio_group
*group
;
1308 group
= vfio_group_get_from_minor(iminor(inode
));
1312 /* Do we need multiple instances of the group open? Seems not. */
1313 opened
= atomic_cmpxchg(&group
->opened
, 0, 1);
1315 vfio_group_put(group
);
1319 /* Is something still in use from a previous open? */
1320 if (group
->container
) {
1321 atomic_dec(&group
->opened
);
1322 vfio_group_put(group
);
1326 filep
->private_data
= group
;
1331 static int vfio_group_fops_release(struct inode
*inode
, struct file
*filep
)
1333 struct vfio_group
*group
= filep
->private_data
;
1335 filep
->private_data
= NULL
;
1337 vfio_group_try_dissolve_container(group
);
1339 atomic_dec(&group
->opened
);
1341 vfio_group_put(group
);
1346 static const struct file_operations vfio_group_fops
= {
1347 .owner
= THIS_MODULE
,
1348 .unlocked_ioctl
= vfio_group_fops_unl_ioctl
,
1349 #ifdef CONFIG_COMPAT
1350 .compat_ioctl
= vfio_group_fops_compat_ioctl
,
1352 .open
= vfio_group_fops_open
,
1353 .release
= vfio_group_fops_release
,
1359 static int vfio_device_fops_release(struct inode
*inode
, struct file
*filep
)
1361 struct vfio_device
*device
= filep
->private_data
;
1363 device
->ops
->release(device
->device_data
);
1365 vfio_group_try_dissolve_container(device
->group
);
1367 vfio_device_put(device
);
1372 static long vfio_device_fops_unl_ioctl(struct file
*filep
,
1373 unsigned int cmd
, unsigned long arg
)
1375 struct vfio_device
*device
= filep
->private_data
;
1377 if (unlikely(!device
->ops
->ioctl
))
1380 return device
->ops
->ioctl(device
->device_data
, cmd
, arg
);
1383 static ssize_t
vfio_device_fops_read(struct file
*filep
, char __user
*buf
,
1384 size_t count
, loff_t
*ppos
)
1386 struct vfio_device
*device
= filep
->private_data
;
1388 if (unlikely(!device
->ops
->read
))
1391 return device
->ops
->read(device
->device_data
, buf
, count
, ppos
);
1394 static ssize_t
vfio_device_fops_write(struct file
*filep
,
1395 const char __user
*buf
,
1396 size_t count
, loff_t
*ppos
)
1398 struct vfio_device
*device
= filep
->private_data
;
1400 if (unlikely(!device
->ops
->write
))
1403 return device
->ops
->write(device
->device_data
, buf
, count
, ppos
);
1406 static int vfio_device_fops_mmap(struct file
*filep
, struct vm_area_struct
*vma
)
1408 struct vfio_device
*device
= filep
->private_data
;
1410 if (unlikely(!device
->ops
->mmap
))
1413 return device
->ops
->mmap(device
->device_data
, vma
);
1416 #ifdef CONFIG_COMPAT
1417 static long vfio_device_fops_compat_ioctl(struct file
*filep
,
1418 unsigned int cmd
, unsigned long arg
)
1420 arg
= (unsigned long)compat_ptr(arg
);
1421 return vfio_device_fops_unl_ioctl(filep
, cmd
, arg
);
1423 #endif /* CONFIG_COMPAT */
1425 static const struct file_operations vfio_device_fops
= {
1426 .owner
= THIS_MODULE
,
1427 .release
= vfio_device_fops_release
,
1428 .read
= vfio_device_fops_read
,
1429 .write
= vfio_device_fops_write
,
1430 .unlocked_ioctl
= vfio_device_fops_unl_ioctl
,
1431 #ifdef CONFIG_COMPAT
1432 .compat_ioctl
= vfio_device_fops_compat_ioctl
,
1434 .mmap
= vfio_device_fops_mmap
,
1438 * External user API, exported by symbols to be linked dynamically.
1440 * The protocol includes:
1441 * 1. do normal VFIO init operation:
1442 * - opening a new container;
1443 * - attaching group(s) to it;
1444 * - setting an IOMMU driver for a container.
1445 * When IOMMU is set for a container, all groups in it are
1446 * considered ready to use by an external user.
1448 * 2. User space passes a group fd to an external user.
1449 * The external user calls vfio_group_get_external_user()
1451 * - the group is initialized;
1452 * - IOMMU is set for it.
1453 * If both checks passed, vfio_group_get_external_user()
1454 * increments the container user counter to prevent
1455 * the VFIO group from disposal before KVM exits.
1457 * 3. The external user calls vfio_external_user_iommu_id()
1458 * to know an IOMMU ID.
1460 * 4. When the external KVM finishes, it calls
1461 * vfio_group_put_external_user() to release the VFIO group.
1462 * This call decrements the container user counter.
1464 struct vfio_group
*vfio_group_get_external_user(struct file
*filep
)
1466 struct vfio_group
*group
= filep
->private_data
;
1468 if (filep
->f_op
!= &vfio_group_fops
)
1469 return ERR_PTR(-EINVAL
);
1471 if (!atomic_inc_not_zero(&group
->container_users
))
1472 return ERR_PTR(-EINVAL
);
1474 if (!group
->container
->iommu_driver
||
1475 !vfio_group_viable(group
)) {
1476 atomic_dec(&group
->container_users
);
1477 return ERR_PTR(-EINVAL
);
1480 vfio_group_get(group
);
1484 EXPORT_SYMBOL_GPL(vfio_group_get_external_user
);
1486 void vfio_group_put_external_user(struct vfio_group
*group
)
1488 vfio_group_put(group
);
1489 vfio_group_try_dissolve_container(group
);
1491 EXPORT_SYMBOL_GPL(vfio_group_put_external_user
);
1493 int vfio_external_user_iommu_id(struct vfio_group
*group
)
1495 return iommu_group_id(group
->iommu_group
);
1497 EXPORT_SYMBOL_GPL(vfio_external_user_iommu_id
);
1499 long vfio_external_check_extension(struct vfio_group
*group
, unsigned long arg
)
1501 return vfio_ioctl_check_extension(group
->container
, arg
);
1503 EXPORT_SYMBOL_GPL(vfio_external_check_extension
);
1506 * Module/class support
1508 static char *vfio_devnode(struct device
*dev
, umode_t
*mode
)
1510 return kasprintf(GFP_KERNEL
, "vfio/%s", dev_name(dev
));
1513 static struct miscdevice vfio_dev
= {
1514 .minor
= VFIO_MINOR
,
1517 .nodename
= "vfio/vfio",
1518 .mode
= S_IRUGO
| S_IWUGO
,
1521 static int __init
vfio_init(void)
1525 idr_init(&vfio
.group_idr
);
1526 mutex_init(&vfio
.group_lock
);
1527 mutex_init(&vfio
.iommu_drivers_lock
);
1528 INIT_LIST_HEAD(&vfio
.group_list
);
1529 INIT_LIST_HEAD(&vfio
.iommu_drivers_list
);
1530 init_waitqueue_head(&vfio
.release_q
);
1532 ret
= misc_register(&vfio_dev
);
1534 pr_err("vfio: misc device register failed\n");
1538 /* /dev/vfio/$GROUP */
1539 vfio
.class = class_create(THIS_MODULE
, "vfio");
1540 if (IS_ERR(vfio
.class)) {
1541 ret
= PTR_ERR(vfio
.class);
1545 vfio
.class->devnode
= vfio_devnode
;
1547 ret
= alloc_chrdev_region(&vfio
.group_devt
, 0, MINORMASK
, "vfio");
1549 goto err_alloc_chrdev
;
1551 cdev_init(&vfio
.group_cdev
, &vfio_group_fops
);
1552 ret
= cdev_add(&vfio
.group_cdev
, vfio
.group_devt
, MINORMASK
);
1556 pr_info(DRIVER_DESC
" version: " DRIVER_VERSION
"\n");
1559 * Attempt to load known iommu-drivers. This gives us a working
1560 * environment without the user needing to explicitly load iommu
1563 request_module_nowait("vfio_iommu_type1");
1564 request_module_nowait("vfio_iommu_spapr_tce");
1569 unregister_chrdev_region(vfio
.group_devt
, MINORMASK
);
1571 class_destroy(vfio
.class);
1574 misc_deregister(&vfio_dev
);
1578 static void __exit
vfio_cleanup(void)
1580 WARN_ON(!list_empty(&vfio
.group_list
));
1582 idr_destroy(&vfio
.group_idr
);
1583 cdev_del(&vfio
.group_cdev
);
1584 unregister_chrdev_region(vfio
.group_devt
, MINORMASK
);
1585 class_destroy(vfio
.class);
1587 misc_deregister(&vfio_dev
);
1590 module_init(vfio_init
);
1591 module_exit(vfio_cleanup
);
1593 MODULE_VERSION(DRIVER_VERSION
);
1594 MODULE_LICENSE("GPL v2");
1595 MODULE_AUTHOR(DRIVER_AUTHOR
);
1596 MODULE_DESCRIPTION(DRIVER_DESC
);
1597 MODULE_ALIAS_MISCDEV(VFIO_MINOR
);
1598 MODULE_ALIAS("devname:vfio/vfio");