1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2021 Intel Corporation
4 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
6 #ifndef __LINUX_IOMMUFD_H
7 #define __LINUX_IOMMUFD_H
10 #include <linux/errno.h>
11 #include <linux/refcount.h>
12 #include <linux/types.h>
13 #include <linux/xarray.h>
18 struct iommu_user_data
;
19 struct iommu_user_data_array
;
20 struct iommufd_access
;
22 struct iommufd_device
;
23 struct iommufd_viommu_ops
;
26 enum iommufd_object_type
{
28 IOMMUFD_OBJ_ANY
= IOMMUFD_OBJ_NONE
,
30 IOMMUFD_OBJ_HWPT_PAGING
,
31 IOMMUFD_OBJ_HWPT_NESTED
,
37 #ifdef CONFIG_IOMMUFD_TEST
43 /* Base struct for all objects with a userspace ID handle. */
44 struct iommufd_object
{
45 refcount_t shortterm_users
;
47 enum iommufd_object_type type
;
51 struct iommufd_device
*iommufd_device_bind(struct iommufd_ctx
*ictx
,
52 struct device
*dev
, u32
*id
);
53 void iommufd_device_unbind(struct iommufd_device
*idev
);
55 int iommufd_device_attach(struct iommufd_device
*idev
, u32
*pt_id
);
56 int iommufd_device_replace(struct iommufd_device
*idev
, u32
*pt_id
);
57 void iommufd_device_detach(struct iommufd_device
*idev
);
59 struct iommufd_ctx
*iommufd_device_to_ictx(struct iommufd_device
*idev
);
60 u32
iommufd_device_to_id(struct iommufd_device
*idev
);
62 struct iommufd_access_ops
{
63 u8 needs_pin_pages
: 1;
64 void (*unmap
)(void *data
, unsigned long iova
, unsigned long length
);
68 IOMMUFD_ACCESS_RW_READ
= 0,
69 IOMMUFD_ACCESS_RW_WRITE
= 1 << 0,
70 /* Set if the caller is in a kthread then rw will use kthread_use_mm() */
71 IOMMUFD_ACCESS_RW_KTHREAD
= 1 << 1,
73 /* Only for use by selftest */
74 __IOMMUFD_ACCESS_RW_SLOW_PATH
= 1 << 2,
77 struct iommufd_access
*
78 iommufd_access_create(struct iommufd_ctx
*ictx
,
79 const struct iommufd_access_ops
*ops
, void *data
, u32
*id
);
80 void iommufd_access_destroy(struct iommufd_access
*access
);
81 int iommufd_access_attach(struct iommufd_access
*access
, u32 ioas_id
);
82 int iommufd_access_replace(struct iommufd_access
*access
, u32 ioas_id
);
83 void iommufd_access_detach(struct iommufd_access
*access
);
85 void iommufd_ctx_get(struct iommufd_ctx
*ictx
);
87 struct iommufd_viommu
{
88 struct iommufd_object obj
;
89 struct iommufd_ctx
*ictx
;
90 struct iommu_device
*iommu_dev
;
91 struct iommufd_hwpt_paging
*hwpt
;
93 const struct iommufd_viommu_ops
*ops
;
101 * struct iommufd_viommu_ops - vIOMMU specific operations
102 * @destroy: Clean up all driver-specific parts of an iommufd_viommu. The memory
103 * of the vIOMMU will be free-ed by iommufd core after calling this op
104 * @alloc_domain_nested: Allocate a IOMMU_DOMAIN_NESTED on a vIOMMU that holds a
105 * nesting parent domain (IOMMU_DOMAIN_PAGING). @user_data
106 * must be defined in include/uapi/linux/iommufd.h.
107 * It must fully initialize the new iommu_domain before
108 * returning. Upon failure, ERR_PTR must be returned.
109 * @cache_invalidate: Flush hardware cache used by a vIOMMU. It can be used for
110 * any IOMMU hardware specific cache: TLB and device cache.
111 * The @array passes in the cache invalidation requests, in
112 * form of a driver data structure. A driver must update the
113 * array->entry_num to report the number of handled requests.
114 * The data structure of the array entry must be defined in
115 * include/uapi/linux/iommufd.h
117 struct iommufd_viommu_ops
{
118 void (*destroy
)(struct iommufd_viommu
*viommu
);
119 struct iommu_domain
*(*alloc_domain_nested
)(
120 struct iommufd_viommu
*viommu
, u32 flags
,
121 const struct iommu_user_data
*user_data
);
122 int (*cache_invalidate
)(struct iommufd_viommu
*viommu
,
123 struct iommu_user_data_array
*array
);
126 #if IS_ENABLED(CONFIG_IOMMUFD)
127 struct iommufd_ctx
*iommufd_ctx_from_file(struct file
*file
);
128 struct iommufd_ctx
*iommufd_ctx_from_fd(int fd
);
129 void iommufd_ctx_put(struct iommufd_ctx
*ictx
);
130 bool iommufd_ctx_has_group(struct iommufd_ctx
*ictx
, struct iommu_group
*group
);
132 int iommufd_access_pin_pages(struct iommufd_access
*access
, unsigned long iova
,
133 unsigned long length
, struct page
**out_pages
,
135 void iommufd_access_unpin_pages(struct iommufd_access
*access
,
136 unsigned long iova
, unsigned long length
);
137 int iommufd_access_rw(struct iommufd_access
*access
, unsigned long iova
,
138 void *data
, size_t len
, unsigned int flags
);
139 int iommufd_vfio_compat_ioas_get_id(struct iommufd_ctx
*ictx
, u32
*out_ioas_id
);
140 int iommufd_vfio_compat_ioas_create(struct iommufd_ctx
*ictx
);
141 int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx
*ictx
);
142 #else /* !CONFIG_IOMMUFD */
143 static inline struct iommufd_ctx
*iommufd_ctx_from_file(struct file
*file
)
145 return ERR_PTR(-EOPNOTSUPP
);
148 static inline void iommufd_ctx_put(struct iommufd_ctx
*ictx
)
152 static inline int iommufd_access_pin_pages(struct iommufd_access
*access
,
154 unsigned long length
,
155 struct page
**out_pages
,
161 static inline void iommufd_access_unpin_pages(struct iommufd_access
*access
,
163 unsigned long length
)
167 static inline int iommufd_access_rw(struct iommufd_access
*access
, unsigned long iova
,
168 void *data
, size_t len
, unsigned int flags
)
173 static inline int iommufd_vfio_compat_ioas_create(struct iommufd_ctx
*ictx
)
178 static inline int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx
*ictx
)
182 #endif /* CONFIG_IOMMUFD */
184 #if IS_ENABLED(CONFIG_IOMMUFD_DRIVER_CORE)
185 struct iommufd_object
*_iommufd_object_alloc(struct iommufd_ctx
*ictx
,
187 enum iommufd_object_type type
);
188 struct device
*iommufd_viommu_find_dev(struct iommufd_viommu
*viommu
,
189 unsigned long vdev_id
);
190 #else /* !CONFIG_IOMMUFD_DRIVER_CORE */
191 static inline struct iommufd_object
*
192 _iommufd_object_alloc(struct iommufd_ctx
*ictx
, size_t size
,
193 enum iommufd_object_type type
)
195 return ERR_PTR(-EOPNOTSUPP
);
198 static inline struct device
*
199 iommufd_viommu_find_dev(struct iommufd_viommu
*viommu
, unsigned long vdev_id
)
203 #endif /* CONFIG_IOMMUFD_DRIVER_CORE */
206 * Helpers for IOMMU driver to allocate driver structures that will be freed by
207 * the iommufd core. The free op will be called prior to freeing the memory.
209 #define iommufd_viommu_alloc(ictx, drv_struct, member, viommu_ops) \
213 static_assert(__same_type(struct iommufd_viommu, \
214 ((drv_struct *)NULL)->member)); \
215 static_assert(offsetof(drv_struct, member.obj) == 0); \
216 ret = (drv_struct *)_iommufd_object_alloc( \
217 ictx, sizeof(drv_struct), IOMMUFD_OBJ_VIOMMU); \
219 ret->member.ops = viommu_ops; \