1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
5 #include <linux/interval_tree.h>
6 #include <linux/iommu.h>
7 #include <linux/iommufd.h>
8 #include <uapi/linux/iommufd.h>
10 #include "io_pagetable.h"
12 void iommufd_ioas_destroy(struct iommufd_object
*obj
)
14 struct iommufd_ioas
*ioas
= container_of(obj
, struct iommufd_ioas
, obj
);
17 rc
= iopt_unmap_all(&ioas
->iopt
, NULL
);
18 WARN_ON(rc
&& rc
!= -ENOENT
);
19 iopt_destroy_table(&ioas
->iopt
);
20 mutex_destroy(&ioas
->mutex
);
23 struct iommufd_ioas
*iommufd_ioas_alloc(struct iommufd_ctx
*ictx
)
25 struct iommufd_ioas
*ioas
;
27 ioas
= iommufd_object_alloc(ictx
, ioas
, IOMMUFD_OBJ_IOAS
);
31 iopt_init_table(&ioas
->iopt
);
32 INIT_LIST_HEAD(&ioas
->hwpt_list
);
33 mutex_init(&ioas
->mutex
);
37 int iommufd_ioas_alloc_ioctl(struct iommufd_ucmd
*ucmd
)
39 struct iommu_ioas_alloc
*cmd
= ucmd
->cmd
;
40 struct iommufd_ioas
*ioas
;
46 ioas
= iommufd_ioas_alloc(ucmd
->ictx
);
50 cmd
->out_ioas_id
= ioas
->obj
.id
;
51 rc
= iommufd_ucmd_respond(ucmd
, sizeof(*cmd
));
54 iommufd_object_finalize(ucmd
->ictx
, &ioas
->obj
);
58 iommufd_object_abort_and_destroy(ucmd
->ictx
, &ioas
->obj
);
62 int iommufd_ioas_iova_ranges(struct iommufd_ucmd
*ucmd
)
64 struct iommu_iova_range __user
*ranges
;
65 struct iommu_ioas_iova_ranges
*cmd
= ucmd
->cmd
;
66 struct iommufd_ioas
*ioas
;
67 struct interval_tree_span_iter span
;
74 ioas
= iommufd_get_ioas(ucmd
->ictx
, cmd
->ioas_id
);
78 down_read(&ioas
->iopt
.iova_rwsem
);
79 max_iovas
= cmd
->num_iovas
;
80 ranges
= u64_to_user_ptr(cmd
->allowed_iovas
);
82 cmd
->out_iova_alignment
= ioas
->iopt
.iova_alignment
;
83 interval_tree_for_each_span(&span
, &ioas
->iopt
.reserved_itree
, 0,
87 if (cmd
->num_iovas
< max_iovas
) {
88 struct iommu_iova_range elm
= {
89 .start
= span
.start_hole
,
90 .last
= span
.last_hole
,
93 if (copy_to_user(&ranges
[cmd
->num_iovas
], &elm
,
101 rc
= iommufd_ucmd_respond(ucmd
, sizeof(*cmd
));
104 if (cmd
->num_iovas
> max_iovas
)
107 up_read(&ioas
->iopt
.iova_rwsem
);
108 iommufd_put_object(ucmd
->ictx
, &ioas
->obj
);
112 static int iommufd_ioas_load_iovas(struct rb_root_cached
*itree
,
113 struct iommu_iova_range __user
*ranges
,
118 for (i
= 0; i
!= num
; i
++) {
119 struct iommu_iova_range range
;
120 struct iopt_allowed
*allowed
;
122 if (copy_from_user(&range
, ranges
+ i
, sizeof(range
)))
125 if (range
.start
>= range
.last
)
128 if (interval_tree_iter_first(itree
, range
.start
, range
.last
))
131 allowed
= kzalloc(sizeof(*allowed
), GFP_KERNEL_ACCOUNT
);
134 allowed
->node
.start
= range
.start
;
135 allowed
->node
.last
= range
.last
;
137 interval_tree_insert(&allowed
->node
, itree
);
142 int iommufd_ioas_allow_iovas(struct iommufd_ucmd
*ucmd
)
144 struct iommu_ioas_allow_iovas
*cmd
= ucmd
->cmd
;
145 struct rb_root_cached allowed_iova
= RB_ROOT_CACHED
;
146 struct interval_tree_node
*node
;
147 struct iommufd_ioas
*ioas
;
148 struct io_pagetable
*iopt
;
154 ioas
= iommufd_get_ioas(ucmd
->ictx
, cmd
->ioas_id
);
156 return PTR_ERR(ioas
);
159 rc
= iommufd_ioas_load_iovas(&allowed_iova
,
160 u64_to_user_ptr(cmd
->allowed_iovas
),
166 * We want the allowed tree update to be atomic, so we have to keep the
167 * original nodes around, and keep track of the new nodes as we allocate
168 * memory for them. The simplest solution is to have a new/old tree and
169 * then swap new for old. On success we free the old tree, on failure we
172 rc
= iopt_set_allow_iova(iopt
, &allowed_iova
);
174 while ((node
= interval_tree_iter_first(&allowed_iova
, 0, ULONG_MAX
))) {
175 interval_tree_remove(node
, &allowed_iova
);
176 kfree(container_of(node
, struct iopt_allowed
, node
));
178 iommufd_put_object(ucmd
->ictx
, &ioas
->obj
);
182 static int conv_iommu_prot(u32 map_flags
)
185 * We provide no manual cache coherency ioctls to userspace and most
186 * architectures make the CPU ops for cache flushing privileged.
187 * Therefore we require the underlying IOMMU to support CPU coherent
188 * operation. Support for IOMMU_CACHE is enforced by the
189 * IOMMU_CAP_CACHE_COHERENCY test during bind.
191 int iommu_prot
= IOMMU_CACHE
;
193 if (map_flags
& IOMMU_IOAS_MAP_WRITEABLE
)
194 iommu_prot
|= IOMMU_WRITE
;
195 if (map_flags
& IOMMU_IOAS_MAP_READABLE
)
196 iommu_prot
|= IOMMU_READ
;
200 int iommufd_ioas_map(struct iommufd_ucmd
*ucmd
)
202 struct iommu_ioas_map
*cmd
= ucmd
->cmd
;
203 unsigned long iova
= cmd
->iova
;
204 struct iommufd_ioas
*ioas
;
205 unsigned int flags
= 0;
209 ~(IOMMU_IOAS_MAP_FIXED_IOVA
| IOMMU_IOAS_MAP_WRITEABLE
|
210 IOMMU_IOAS_MAP_READABLE
)) ||
213 if (cmd
->iova
>= ULONG_MAX
|| cmd
->length
>= ULONG_MAX
)
217 (IOMMU_IOAS_MAP_WRITEABLE
| IOMMU_IOAS_MAP_READABLE
)))
220 ioas
= iommufd_get_ioas(ucmd
->ictx
, cmd
->ioas_id
);
222 return PTR_ERR(ioas
);
224 if (!(cmd
->flags
& IOMMU_IOAS_MAP_FIXED_IOVA
))
225 flags
= IOPT_ALLOC_IOVA
;
226 rc
= iopt_map_user_pages(ucmd
->ictx
, &ioas
->iopt
, &iova
,
227 u64_to_user_ptr(cmd
->user_va
), cmd
->length
,
228 conv_iommu_prot(cmd
->flags
), flags
);
233 rc
= iommufd_ucmd_respond(ucmd
, sizeof(*cmd
));
235 iommufd_put_object(ucmd
->ictx
, &ioas
->obj
);
239 int iommufd_ioas_copy(struct iommufd_ucmd
*ucmd
)
241 struct iommu_ioas_copy
*cmd
= ucmd
->cmd
;
242 struct iommufd_ioas
*src_ioas
;
243 struct iommufd_ioas
*dst_ioas
;
244 unsigned int flags
= 0;
245 LIST_HEAD(pages_list
);
249 iommufd_test_syz_conv_iova_id(ucmd
, cmd
->src_ioas_id
, &cmd
->src_iova
,
253 ~(IOMMU_IOAS_MAP_FIXED_IOVA
| IOMMU_IOAS_MAP_WRITEABLE
|
254 IOMMU_IOAS_MAP_READABLE
)))
256 if (cmd
->length
>= ULONG_MAX
|| cmd
->src_iova
>= ULONG_MAX
||
257 cmd
->dst_iova
>= ULONG_MAX
)
261 (IOMMU_IOAS_MAP_WRITEABLE
| IOMMU_IOAS_MAP_READABLE
)))
264 src_ioas
= iommufd_get_ioas(ucmd
->ictx
, cmd
->src_ioas_id
);
265 if (IS_ERR(src_ioas
))
266 return PTR_ERR(src_ioas
);
267 rc
= iopt_get_pages(&src_ioas
->iopt
, cmd
->src_iova
, cmd
->length
,
269 iommufd_put_object(ucmd
->ictx
, &src_ioas
->obj
);
273 dst_ioas
= iommufd_get_ioas(ucmd
->ictx
, cmd
->dst_ioas_id
);
274 if (IS_ERR(dst_ioas
)) {
275 rc
= PTR_ERR(dst_ioas
);
279 if (!(cmd
->flags
& IOMMU_IOAS_MAP_FIXED_IOVA
))
280 flags
= IOPT_ALLOC_IOVA
;
281 iova
= cmd
->dst_iova
;
282 rc
= iopt_map_pages(&dst_ioas
->iopt
, &pages_list
, cmd
->length
, &iova
,
283 conv_iommu_prot(cmd
->flags
), flags
);
287 cmd
->dst_iova
= iova
;
288 rc
= iommufd_ucmd_respond(ucmd
, sizeof(*cmd
));
290 iommufd_put_object(ucmd
->ictx
, &dst_ioas
->obj
);
292 iopt_free_pages_list(&pages_list
);
296 int iommufd_ioas_unmap(struct iommufd_ucmd
*ucmd
)
298 struct iommu_ioas_unmap
*cmd
= ucmd
->cmd
;
299 struct iommufd_ioas
*ioas
;
300 unsigned long unmapped
= 0;
303 ioas
= iommufd_get_ioas(ucmd
->ictx
, cmd
->ioas_id
);
305 return PTR_ERR(ioas
);
307 if (cmd
->iova
== 0 && cmd
->length
== U64_MAX
) {
308 rc
= iopt_unmap_all(&ioas
->iopt
, &unmapped
);
312 if (cmd
->iova
>= ULONG_MAX
|| cmd
->length
>= ULONG_MAX
) {
316 rc
= iopt_unmap_iova(&ioas
->iopt
, cmd
->iova
, cmd
->length
,
322 cmd
->length
= unmapped
;
323 rc
= iommufd_ucmd_respond(ucmd
, sizeof(*cmd
));
326 iommufd_put_object(ucmd
->ictx
, &ioas
->obj
);
330 int iommufd_option_rlimit_mode(struct iommu_option
*cmd
,
331 struct iommufd_ctx
*ictx
)
336 if (cmd
->op
== IOMMU_OPTION_OP_GET
) {
337 cmd
->val64
= ictx
->account_mode
== IOPT_PAGES_ACCOUNT_MM
;
340 if (cmd
->op
== IOMMU_OPTION_OP_SET
) {
343 if (!capable(CAP_SYS_RESOURCE
))
346 xa_lock(&ictx
->objects
);
347 if (!xa_empty(&ictx
->objects
)) {
351 ictx
->account_mode
= IOPT_PAGES_ACCOUNT_USER
;
352 else if (cmd
->val64
== 1)
353 ictx
->account_mode
= IOPT_PAGES_ACCOUNT_MM
;
357 xa_unlock(&ictx
->objects
);
364 static int iommufd_ioas_option_huge_pages(struct iommu_option
*cmd
,
365 struct iommufd_ioas
*ioas
)
367 if (cmd
->op
== IOMMU_OPTION_OP_GET
) {
368 cmd
->val64
= !ioas
->iopt
.disable_large_pages
;
371 if (cmd
->op
== IOMMU_OPTION_OP_SET
) {
373 return iopt_disable_large_pages(&ioas
->iopt
);
374 if (cmd
->val64
== 1) {
375 iopt_enable_large_pages(&ioas
->iopt
);
383 int iommufd_ioas_option(struct iommufd_ucmd
*ucmd
)
385 struct iommu_option
*cmd
= ucmd
->cmd
;
386 struct iommufd_ioas
*ioas
;
392 ioas
= iommufd_get_ioas(ucmd
->ictx
, cmd
->object_id
);
394 return PTR_ERR(ioas
);
396 switch (cmd
->option_id
) {
397 case IOMMU_OPTION_HUGE_PAGES
:
398 rc
= iommufd_ioas_option_huge_pages(cmd
, ioas
);
404 iommufd_put_object(ucmd
->ictx
, &ioas
->obj
);