1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
4 * Author: Alex Williamson <alex.williamson@redhat.com>
6 #ifndef __VFIO_VFIO_H__
7 #define __VFIO_VFIO_H__
9 #include <linux/file.h>
10 #include <linux/device.h>
11 #include <linux/cdev.h>
12 #include <linux/module.h>
13 #include <linux/vfio.h>
17 struct vfio_container
;
19 struct vfio_device_file
{
20 struct vfio_device
*device
;
21 struct vfio_group
*group
;
24 u32 devid
; /* only valid when iommufd is valid */
25 spinlock_t kvm_ref_lock
; /* protect kvm field */
27 struct iommufd_ctx
*iommufd
; /* protected by struct vfio_device_set::lock */
30 void vfio_device_put_registration(struct vfio_device
*device
);
31 bool vfio_device_try_get_registration(struct vfio_device
*device
);
32 int vfio_df_open(struct vfio_device_file
*df
);
33 void vfio_df_close(struct vfio_device_file
*df
);
34 struct vfio_device_file
*
35 vfio_allocate_device_file(struct vfio_device
*device
);
37 extern const struct file_operations vfio_device_fops
;
39 #ifdef CONFIG_VFIO_NOIOMMU
40 extern bool vfio_noiommu __read_mostly
;
42 enum { vfio_noiommu
= false };
45 enum vfio_group_type
{
47 * Physical device with IOMMU backing.
52 * Virtual device without IOMMU backing. The VFIO core fakes up an
53 * iommu_group as the iommu_group sysfs interface is part of the
54 * userspace ABI. The user of these devices must not be able to
55 * directly trigger unmediated DMA.
60 * Physical device without IOMMU backing. The VFIO core fakes up an
61 * iommu_group as the iommu_group sysfs interface is part of the
62 * userspace ABI. Users can trigger unmediated DMA by the device,
63 * usage is highly dangerous, requires an explicit opt-in and will
69 #if IS_ENABLED(CONFIG_VFIO_GROUP)
74 * When drivers is non-zero a driver is attached to the struct device
75 * that provided the iommu_group and thus the iommu_group is a valid
76 * pointer. When drivers is 0 the driver is being detached. Once users
77 * reaches 0 then the iommu_group is invalid.
80 unsigned int container_users
;
81 struct iommu_group
*iommu_group
;
82 struct vfio_container
*container
;
83 struct list_head device_list
;
84 struct mutex device_lock
;
85 struct list_head vfio_next
;
86 #if IS_ENABLED(CONFIG_VFIO_CONTAINER)
87 struct list_head container_next
;
89 enum vfio_group_type type
;
90 struct mutex group_lock
;
92 struct file
*opened_file
;
93 struct blocking_notifier_head notifier
;
94 struct iommufd_ctx
*iommufd
;
95 spinlock_t kvm_ref_lock
;
96 unsigned int cdev_device_open_cnt
;
99 int vfio_device_block_group(struct vfio_device
*device
);
100 void vfio_device_unblock_group(struct vfio_device
*device
);
101 int vfio_device_set_group(struct vfio_device
*device
,
102 enum vfio_group_type type
);
103 void vfio_device_remove_group(struct vfio_device
*device
);
104 void vfio_device_group_register(struct vfio_device
*device
);
105 void vfio_device_group_unregister(struct vfio_device
*device
);
106 int vfio_device_group_use_iommu(struct vfio_device
*device
);
107 void vfio_device_group_unuse_iommu(struct vfio_device
*device
);
108 void vfio_df_group_close(struct vfio_device_file
*df
);
109 struct vfio_group
*vfio_group_from_file(struct file
*file
);
110 bool vfio_group_enforced_coherent(struct vfio_group
*group
);
111 void vfio_group_set_kvm(struct vfio_group
*group
, struct kvm
*kvm
);
112 bool vfio_device_has_container(struct vfio_device
*device
);
113 int __init
vfio_group_init(void);
114 void vfio_group_cleanup(void);
116 static inline bool vfio_device_is_noiommu(struct vfio_device
*vdev
)
118 return IS_ENABLED(CONFIG_VFIO_NOIOMMU
) &&
119 vdev
->group
->type
== VFIO_NO_IOMMU
;
124 static inline int vfio_device_block_group(struct vfio_device
*device
)
129 static inline void vfio_device_unblock_group(struct vfio_device
*device
)
133 static inline int vfio_device_set_group(struct vfio_device
*device
,
134 enum vfio_group_type type
)
139 static inline void vfio_device_remove_group(struct vfio_device
*device
)
143 static inline void vfio_device_group_register(struct vfio_device
*device
)
147 static inline void vfio_device_group_unregister(struct vfio_device
*device
)
151 static inline int vfio_device_group_use_iommu(struct vfio_device
*device
)
156 static inline void vfio_device_group_unuse_iommu(struct vfio_device
*device
)
160 static inline void vfio_df_group_close(struct vfio_device_file
*df
)
164 static inline struct vfio_group
*vfio_group_from_file(struct file
*file
)
169 static inline bool vfio_group_enforced_coherent(struct vfio_group
*group
)
174 static inline void vfio_group_set_kvm(struct vfio_group
*group
, struct kvm
*kvm
)
178 static inline bool vfio_device_has_container(struct vfio_device
*device
)
183 static inline int __init
vfio_group_init(void)
188 static inline void vfio_group_cleanup(void)
192 static inline bool vfio_device_is_noiommu(struct vfio_device
*vdev
)
196 #endif /* CONFIG_VFIO_GROUP */
198 #if IS_ENABLED(CONFIG_VFIO_CONTAINER)
200 * struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks
202 struct vfio_iommu_driver_ops
{
204 struct module
*owner
;
205 void *(*open
)(unsigned long arg
);
206 void (*release
)(void *iommu_data
);
207 long (*ioctl
)(void *iommu_data
, unsigned int cmd
,
209 int (*attach_group
)(void *iommu_data
,
210 struct iommu_group
*group
,
211 enum vfio_group_type
);
212 void (*detach_group
)(void *iommu_data
,
213 struct iommu_group
*group
);
214 int (*pin_pages
)(void *iommu_data
,
215 struct iommu_group
*group
,
216 dma_addr_t user_iova
,
218 struct page
**pages
);
219 void (*unpin_pages
)(void *iommu_data
,
220 dma_addr_t user_iova
, int npage
);
221 void (*register_device
)(void *iommu_data
,
222 struct vfio_device
*vdev
);
223 void (*unregister_device
)(void *iommu_data
,
224 struct vfio_device
*vdev
);
225 int (*dma_rw
)(void *iommu_data
, dma_addr_t user_iova
,
226 void *data
, size_t count
, bool write
);
227 struct iommu_domain
*(*group_iommu_domain
)(void *iommu_data
,
228 struct iommu_group
*group
);
231 struct vfio_iommu_driver
{
232 const struct vfio_iommu_driver_ops
*ops
;
233 struct list_head vfio_next
;
236 int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops
*ops
);
237 void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops
*ops
);
239 struct vfio_container
*vfio_container_from_file(struct file
*filep
);
240 int vfio_group_use_container(struct vfio_group
*group
);
241 void vfio_group_unuse_container(struct vfio_group
*group
);
242 int vfio_container_attach_group(struct vfio_container
*container
,
243 struct vfio_group
*group
);
244 void vfio_group_detach_container(struct vfio_group
*group
);
245 void vfio_device_container_register(struct vfio_device
*device
);
246 void vfio_device_container_unregister(struct vfio_device
*device
);
247 int vfio_device_container_pin_pages(struct vfio_device
*device
,
248 dma_addr_t iova
, int npage
,
249 int prot
, struct page
**pages
);
250 void vfio_device_container_unpin_pages(struct vfio_device
*device
,
251 dma_addr_t iova
, int npage
);
252 int vfio_device_container_dma_rw(struct vfio_device
*device
,
253 dma_addr_t iova
, void *data
,
254 size_t len
, bool write
);
256 int __init
vfio_container_init(void);
257 void vfio_container_cleanup(void);
259 static inline struct vfio_container
*
260 vfio_container_from_file(struct file
*filep
)
265 static inline int vfio_group_use_container(struct vfio_group
*group
)
270 static inline void vfio_group_unuse_container(struct vfio_group
*group
)
274 static inline int vfio_container_attach_group(struct vfio_container
*container
,
275 struct vfio_group
*group
)
280 static inline void vfio_group_detach_container(struct vfio_group
*group
)
284 static inline void vfio_device_container_register(struct vfio_device
*device
)
288 static inline void vfio_device_container_unregister(struct vfio_device
*device
)
292 static inline int vfio_device_container_pin_pages(struct vfio_device
*device
,
293 dma_addr_t iova
, int npage
,
294 int prot
, struct page
**pages
)
299 static inline void vfio_device_container_unpin_pages(struct vfio_device
*device
,
300 dma_addr_t iova
, int npage
)
304 static inline int vfio_device_container_dma_rw(struct vfio_device
*device
,
305 dma_addr_t iova
, void *data
,
306 size_t len
, bool write
)
311 static inline int vfio_container_init(void)
315 static inline void vfio_container_cleanup(void)
320 #if IS_ENABLED(CONFIG_IOMMUFD)
321 bool vfio_iommufd_device_has_compat_ioas(struct vfio_device
*vdev
,
322 struct iommufd_ctx
*ictx
);
323 int vfio_df_iommufd_bind(struct vfio_device_file
*df
);
324 void vfio_df_iommufd_unbind(struct vfio_device_file
*df
);
325 int vfio_iommufd_compat_attach_ioas(struct vfio_device
*device
,
326 struct iommufd_ctx
*ictx
);
329 vfio_iommufd_device_has_compat_ioas(struct vfio_device
*vdev
,
330 struct iommufd_ctx
*ictx
)
335 static inline int vfio_df_iommufd_bind(struct vfio_device_file
*fd
)
340 static inline void vfio_df_iommufd_unbind(struct vfio_device_file
*df
)
345 vfio_iommufd_compat_attach_ioas(struct vfio_device
*device
,
346 struct iommufd_ctx
*ictx
)
352 int vfio_df_ioctl_attach_pt(struct vfio_device_file
*df
,
353 struct vfio_device_attach_iommufd_pt __user
*arg
);
354 int vfio_df_ioctl_detach_pt(struct vfio_device_file
*df
,
355 struct vfio_device_detach_iommufd_pt __user
*arg
);
357 #if IS_ENABLED(CONFIG_VFIO_DEVICE_CDEV)
358 void vfio_init_device_cdev(struct vfio_device
*device
);
360 static inline int vfio_device_add(struct vfio_device
*device
)
362 /* cdev does not support noiommu device */
363 if (vfio_device_is_noiommu(device
))
364 return device_add(&device
->device
);
365 vfio_init_device_cdev(device
);
366 return cdev_device_add(&device
->cdev
, &device
->device
);
369 static inline void vfio_device_del(struct vfio_device
*device
)
371 if (vfio_device_is_noiommu(device
))
372 device_del(&device
->device
);
374 cdev_device_del(&device
->cdev
, &device
->device
);
377 int vfio_device_fops_cdev_open(struct inode
*inode
, struct file
*filep
);
378 long vfio_df_ioctl_bind_iommufd(struct vfio_device_file
*df
,
379 struct vfio_device_bind_iommufd __user
*arg
);
380 void vfio_df_unbind_iommufd(struct vfio_device_file
*df
);
381 int vfio_cdev_init(struct class *device_class
);
382 void vfio_cdev_cleanup(void);
384 static inline void vfio_init_device_cdev(struct vfio_device
*device
)
388 static inline int vfio_device_add(struct vfio_device
*device
)
390 return device_add(&device
->device
);
393 static inline void vfio_device_del(struct vfio_device
*device
)
395 device_del(&device
->device
);
398 static inline int vfio_device_fops_cdev_open(struct inode
*inode
,
404 static inline long vfio_df_ioctl_bind_iommufd(struct vfio_device_file
*df
,
405 struct vfio_device_bind_iommufd __user
*arg
)
410 static inline void vfio_df_unbind_iommufd(struct vfio_device_file
*df
)
414 static inline int vfio_cdev_init(struct class *device_class
)
419 static inline void vfio_cdev_cleanup(void)
422 #endif /* CONFIG_VFIO_DEVICE_CDEV */
424 #if IS_ENABLED(CONFIG_VFIO_VIRQFD)
425 int __init
vfio_virqfd_init(void);
426 void vfio_virqfd_exit(void);
428 static inline int __init
vfio_virqfd_init(void)
432 static inline void vfio_virqfd_exit(void)
437 #if IS_ENABLED(CONFIG_KVM)
438 void vfio_device_get_kvm_safe(struct vfio_device
*device
, struct kvm
*kvm
);
439 void vfio_device_put_kvm(struct vfio_device
*device
);
441 static inline void vfio_device_get_kvm_safe(struct vfio_device
*device
,
446 static inline void vfio_device_put_kvm(struct vfio_device
*device
)
451 #ifdef CONFIG_VFIO_DEBUGFS
452 void vfio_debugfs_create_root(void);
453 void vfio_debugfs_remove_root(void);
455 void vfio_device_debugfs_init(struct vfio_device
*vdev
);
456 void vfio_device_debugfs_exit(struct vfio_device
*vdev
);
458 static inline void vfio_debugfs_create_root(void) { }
459 static inline void vfio_debugfs_remove_root(void) { }
461 static inline void vfio_device_debugfs_init(struct vfio_device
*vdev
) { }
462 static inline void vfio_device_debugfs_exit(struct vfio_device
*vdev
) { }
463 #endif /* CONFIG_VFIO_DEBUGFS */