2 * VFIO-KVM bridge pseudo device
4 * Copyright (C) 2013 Red Hat, Inc. All rights reserved.
5 * Author: Alex Williamson <alex.williamson@redhat.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/errno.h>
13 #include <linux/file.h>
14 #include <linux/kvm_host.h>
15 #include <linux/list.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20 #include <linux/vfio.h>
23 struct kvm_vfio_group
{
24 struct list_head node
;
25 struct vfio_group
*vfio_group
;
29 struct list_head group_list
;
34 static struct vfio_group
*kvm_vfio_group_get_external_user(struct file
*filep
)
36 struct vfio_group
*vfio_group
;
37 struct vfio_group
*(*fn
)(struct file
*);
39 fn
= symbol_get(vfio_group_get_external_user
);
41 return ERR_PTR(-EINVAL
);
43 vfio_group
= fn(filep
);
45 symbol_put(vfio_group_get_external_user
);
50 static void kvm_vfio_group_put_external_user(struct vfio_group
*vfio_group
)
52 void (*fn
)(struct vfio_group
*);
54 fn
= symbol_get(vfio_group_put_external_user
);
60 symbol_put(vfio_group_put_external_user
);
63 static bool kvm_vfio_group_is_coherent(struct vfio_group
*vfio_group
)
65 long (*fn
)(struct vfio_group
*, unsigned long);
68 fn
= symbol_get(vfio_external_check_extension
);
72 ret
= fn(vfio_group
, VFIO_DMA_CC_IOMMU
);
74 symbol_put(vfio_external_check_extension
);
80 * Groups can use the same or different IOMMU domains. If the same then
81 * adding a new group may change the coherency of groups we've previously
82 * been told about. We don't want to care about any of that so we retest
83 * each group and bail as soon as we find one that's noncoherent. This
84 * means we only ever [un]register_noncoherent_dma once for the whole device.
86 static void kvm_vfio_update_coherency(struct kvm_device
*dev
)
88 struct kvm_vfio
*kv
= dev
->private;
89 bool noncoherent
= false;
90 struct kvm_vfio_group
*kvg
;
92 mutex_lock(&kv
->lock
);
94 list_for_each_entry(kvg
, &kv
->group_list
, node
) {
95 if (!kvm_vfio_group_is_coherent(kvg
->vfio_group
)) {
101 if (noncoherent
!= kv
->noncoherent
) {
102 kv
->noncoherent
= noncoherent
;
105 kvm_arch_register_noncoherent_dma(dev
->kvm
);
107 kvm_arch_unregister_noncoherent_dma(dev
->kvm
);
110 mutex_unlock(&kv
->lock
);
113 static int kvm_vfio_set_group(struct kvm_device
*dev
, long attr
, u64 arg
)
115 struct kvm_vfio
*kv
= dev
->private;
116 struct vfio_group
*vfio_group
;
117 struct kvm_vfio_group
*kvg
;
118 int32_t __user
*argp
= (int32_t __user
*)(unsigned long)arg
;
124 case KVM_DEV_VFIO_GROUP_ADD
:
125 if (get_user(fd
, argp
))
132 vfio_group
= kvm_vfio_group_get_external_user(f
.file
);
135 if (IS_ERR(vfio_group
))
136 return PTR_ERR(vfio_group
);
138 mutex_lock(&kv
->lock
);
140 list_for_each_entry(kvg
, &kv
->group_list
, node
) {
141 if (kvg
->vfio_group
== vfio_group
) {
142 mutex_unlock(&kv
->lock
);
143 kvm_vfio_group_put_external_user(vfio_group
);
148 kvg
= kzalloc(sizeof(*kvg
), GFP_KERNEL
);
150 mutex_unlock(&kv
->lock
);
151 kvm_vfio_group_put_external_user(vfio_group
);
155 list_add_tail(&kvg
->node
, &kv
->group_list
);
156 kvg
->vfio_group
= vfio_group
;
158 kvm_arch_start_assignment(dev
->kvm
);
160 mutex_unlock(&kv
->lock
);
162 kvm_vfio_update_coherency(dev
);
166 case KVM_DEV_VFIO_GROUP_DEL
:
167 if (get_user(fd
, argp
))
174 vfio_group
= kvm_vfio_group_get_external_user(f
.file
);
177 if (IS_ERR(vfio_group
))
178 return PTR_ERR(vfio_group
);
182 mutex_lock(&kv
->lock
);
184 list_for_each_entry(kvg
, &kv
->group_list
, node
) {
185 if (kvg
->vfio_group
!= vfio_group
)
188 list_del(&kvg
->node
);
189 kvm_vfio_group_put_external_user(kvg
->vfio_group
);
195 kvm_arch_end_assignment(dev
->kvm
);
197 mutex_unlock(&kv
->lock
);
199 kvm_vfio_group_put_external_user(vfio_group
);
201 kvm_vfio_update_coherency(dev
);
209 static int kvm_vfio_set_attr(struct kvm_device
*dev
,
210 struct kvm_device_attr
*attr
)
212 switch (attr
->group
) {
213 case KVM_DEV_VFIO_GROUP
:
214 return kvm_vfio_set_group(dev
, attr
->attr
, attr
->addr
);
220 static int kvm_vfio_has_attr(struct kvm_device
*dev
,
221 struct kvm_device_attr
*attr
)
223 switch (attr
->group
) {
224 case KVM_DEV_VFIO_GROUP
:
225 switch (attr
->attr
) {
226 case KVM_DEV_VFIO_GROUP_ADD
:
227 case KVM_DEV_VFIO_GROUP_DEL
:
237 static void kvm_vfio_destroy(struct kvm_device
*dev
)
239 struct kvm_vfio
*kv
= dev
->private;
240 struct kvm_vfio_group
*kvg
, *tmp
;
242 list_for_each_entry_safe(kvg
, tmp
, &kv
->group_list
, node
) {
243 kvm_vfio_group_put_external_user(kvg
->vfio_group
);
244 list_del(&kvg
->node
);
246 kvm_arch_end_assignment(dev
->kvm
);
249 kvm_vfio_update_coherency(dev
);
252 kfree(dev
); /* alloc by kvm_ioctl_create_device, free by .destroy */
255 static int kvm_vfio_create(struct kvm_device
*dev
, u32 type
);
257 static struct kvm_device_ops kvm_vfio_ops
= {
259 .create
= kvm_vfio_create
,
260 .destroy
= kvm_vfio_destroy
,
261 .set_attr
= kvm_vfio_set_attr
,
262 .has_attr
= kvm_vfio_has_attr
,
265 static int kvm_vfio_create(struct kvm_device
*dev
, u32 type
)
267 struct kvm_device
*tmp
;
270 /* Only one VFIO "device" per VM */
271 list_for_each_entry(tmp
, &dev
->kvm
->devices
, vm_node
)
272 if (tmp
->ops
== &kvm_vfio_ops
)
275 kv
= kzalloc(sizeof(*kv
), GFP_KERNEL
);
279 INIT_LIST_HEAD(&kv
->group_list
);
280 mutex_init(&kv
->lock
);
287 int kvm_vfio_ops_init(void)
289 return kvm_register_device_ops(&kvm_vfio_ops
, KVM_DEV_TYPE_VFIO
);
292 void kvm_vfio_ops_exit(void)
294 kvm_unregister_device_ops(KVM_DEV_TYPE_VFIO
);