1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES.
4 * Kernel side components to support tools/testing/selftests/iommu
6 #include <linux/anon_inodes.h>
7 #include <linux/debugfs.h>
8 #include <linux/fault-inject.h>
9 #include <linux/file.h>
10 #include <linux/iommu.h>
11 #include <linux/platform_device.h>
12 #include <linux/slab.h>
13 #include <linux/xarray.h>
14 #include <uapi/linux/iommufd.h>
16 #include "../iommu-priv.h"
17 #include "io_pagetable.h"
18 #include "iommufd_private.h"
19 #include "iommufd_test.h"
21 static DECLARE_FAULT_ATTR(fail_iommufd
);
22 static struct dentry
*dbgfs_root
;
23 static struct platform_device
*selftest_iommu_dev
;
24 static const struct iommu_ops mock_ops
;
25 static struct iommu_domain_ops domain_nested_ops
;
27 size_t iommufd_test_memory_limit
= 65536;
29 struct mock_bus_type
{
31 struct notifier_block nb
;
34 static struct mock_bus_type iommufd_mock_bus_type
= {
36 .name
= "iommufd_mock",
40 static DEFINE_IDA(mock_dev_ida
);
44 MOCK_IO_PAGE_SIZE
= PAGE_SIZE
/ 2,
45 MOCK_HUGE_PAGE_SIZE
= 512 * MOCK_IO_PAGE_SIZE
,
48 * Like a real page table alignment requires the low bits of the address
49 * to be zero. xarray also requires the high bit to be zero, so we store
50 * the pfns shifted. The upper bits are used for metadata.
52 MOCK_PFN_MASK
= ULONG_MAX
/ MOCK_IO_PAGE_SIZE
,
54 _MOCK_PFN_START
= MOCK_PFN_MASK
+ 1,
55 MOCK_PFN_START_IOVA
= _MOCK_PFN_START
,
56 MOCK_PFN_LAST_IOVA
= _MOCK_PFN_START
,
57 MOCK_PFN_DIRTY_IOVA
= _MOCK_PFN_START
<< 1,
58 MOCK_PFN_HUGE_IOVA
= _MOCK_PFN_START
<< 2,
62 * Syzkaller has trouble randomizing the correct iova to use since it is linked
63 * to the map ioctl's output, and it has no ide about that. So, simplify things.
64 * In syzkaller mode the 64 bit IOVA is converted into an nth area and offset
65 * value. This has a much smaller randomization space and syzkaller can hit it.
67 static unsigned long __iommufd_test_syz_conv_iova(struct io_pagetable
*iopt
,
74 struct syz_layout
*syz
= (void *)iova
;
75 unsigned int nth
= syz
->nth_area
;
76 struct iopt_area
*area
;
78 down_read(&iopt
->iova_rwsem
);
79 for (area
= iopt_area_iter_first(iopt
, 0, ULONG_MAX
); area
;
80 area
= iopt_area_iter_next(area
, 0, ULONG_MAX
)) {
82 up_read(&iopt
->iova_rwsem
);
83 return iopt_area_iova(area
) + syz
->offset
;
87 up_read(&iopt
->iova_rwsem
);
92 static unsigned long iommufd_test_syz_conv_iova(struct iommufd_access
*access
,
97 mutex_lock(&access
->ioas_lock
);
99 mutex_unlock(&access
->ioas_lock
);
102 ret
= __iommufd_test_syz_conv_iova(&access
->ioas
->iopt
, iova
);
103 mutex_unlock(&access
->ioas_lock
);
107 void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd
*ucmd
,
108 unsigned int ioas_id
, u64
*iova
, u32
*flags
)
110 struct iommufd_ioas
*ioas
;
112 if (!(*flags
& MOCK_FLAGS_ACCESS_SYZ
))
114 *flags
&= ~(u32
)MOCK_FLAGS_ACCESS_SYZ
;
116 ioas
= iommufd_get_ioas(ucmd
->ictx
, ioas_id
);
119 *iova
= __iommufd_test_syz_conv_iova(&ioas
->iopt
, iova
);
120 iommufd_put_object(ucmd
->ictx
, &ioas
->obj
);
123 struct mock_iommu_domain
{
125 struct iommu_domain domain
;
129 struct mock_iommu_domain_nested
{
130 struct iommu_domain domain
;
131 struct mock_iommu_domain
*parent
;
132 u32 iotlb
[MOCK_NESTED_DOMAIN_IOTLB_NUM
];
135 enum selftest_obj_type
{
145 struct selftest_obj
{
146 struct iommufd_object obj
;
147 enum selftest_obj_type type
;
151 struct iommufd_device
*idev
;
152 struct iommufd_ctx
*ictx
;
153 struct mock_dev
*mock_dev
;
158 static int mock_domain_nop_attach(struct iommu_domain
*domain
,
161 struct mock_dev
*mdev
= container_of(dev
, struct mock_dev
, dev
);
163 if (domain
->dirty_ops
&& (mdev
->flags
& MOCK_FLAGS_DEVICE_NO_DIRTY
))
169 static const struct iommu_domain_ops mock_blocking_ops
= {
170 .attach_dev
= mock_domain_nop_attach
,
173 static struct iommu_domain mock_blocking_domain
= {
174 .type
= IOMMU_DOMAIN_BLOCKED
,
175 .ops
= &mock_blocking_ops
,
178 static void *mock_domain_hw_info(struct device
*dev
, u32
*length
, u32
*type
)
180 struct iommu_test_hw_info
*info
;
182 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
184 return ERR_PTR(-ENOMEM
);
186 info
->test_reg
= IOMMU_HW_INFO_SELFTEST_REGVAL
;
187 *length
= sizeof(*info
);
188 *type
= IOMMU_HW_INFO_TYPE_SELFTEST
;
193 static int mock_domain_set_dirty_tracking(struct iommu_domain
*domain
,
196 struct mock_iommu_domain
*mock
=
197 container_of(domain
, struct mock_iommu_domain
, domain
);
198 unsigned long flags
= mock
->flags
;
200 if (enable
&& !domain
->dirty_ops
)
204 if (!(enable
^ !!(flags
& MOCK_DIRTY_TRACK
)))
207 flags
= (enable
? flags
| MOCK_DIRTY_TRACK
: flags
& ~MOCK_DIRTY_TRACK
);
213 static bool mock_test_and_clear_dirty(struct mock_iommu_domain
*mock
,
214 unsigned long iova
, size_t page_size
,
217 unsigned long cur
, end
= iova
+ page_size
- 1;
221 for (cur
= iova
; cur
< end
; cur
+= MOCK_IO_PAGE_SIZE
) {
222 ent
= xa_load(&mock
->pfns
, cur
/ MOCK_IO_PAGE_SIZE
);
223 if (!ent
|| !(xa_to_value(ent
) & MOCK_PFN_DIRTY_IOVA
))
228 if (!(flags
& IOMMU_DIRTY_NO_CLEAR
)) {
231 val
= xa_to_value(ent
) & ~MOCK_PFN_DIRTY_IOVA
;
232 old
= xa_store(&mock
->pfns
, cur
/ MOCK_IO_PAGE_SIZE
,
233 xa_mk_value(val
), GFP_KERNEL
);
234 WARN_ON_ONCE(ent
!= old
);
241 static int mock_domain_read_and_clear_dirty(struct iommu_domain
*domain
,
242 unsigned long iova
, size_t size
,
244 struct iommu_dirty_bitmap
*dirty
)
246 struct mock_iommu_domain
*mock
=
247 container_of(domain
, struct mock_iommu_domain
, domain
);
248 unsigned long end
= iova
+ size
;
251 if (!(mock
->flags
& MOCK_DIRTY_TRACK
) && dirty
->bitmap
)
255 unsigned long pgsize
= MOCK_IO_PAGE_SIZE
;
258 ent
= xa_load(&mock
->pfns
, iova
/ MOCK_IO_PAGE_SIZE
);
264 if (xa_to_value(ent
) & MOCK_PFN_HUGE_IOVA
)
265 pgsize
= MOCK_HUGE_PAGE_SIZE
;
266 head
= iova
& ~(pgsize
- 1);
269 if (mock_test_and_clear_dirty(mock
, head
, pgsize
, flags
))
270 iommu_dirty_bitmap_record(dirty
, iova
, pgsize
);
272 } while (iova
< end
);
277 static const struct iommu_dirty_ops dirty_ops
= {
278 .set_dirty_tracking
= mock_domain_set_dirty_tracking
,
279 .read_and_clear_dirty
= mock_domain_read_and_clear_dirty
,
282 static struct iommu_domain
*mock_domain_alloc_paging(struct device
*dev
)
284 struct mock_dev
*mdev
= container_of(dev
, struct mock_dev
, dev
);
285 struct mock_iommu_domain
*mock
;
287 mock
= kzalloc(sizeof(*mock
), GFP_KERNEL
);
290 mock
->domain
.geometry
.aperture_start
= MOCK_APERTURE_START
;
291 mock
->domain
.geometry
.aperture_end
= MOCK_APERTURE_LAST
;
292 mock
->domain
.pgsize_bitmap
= MOCK_IO_PAGE_SIZE
;
293 if (dev
&& mdev
->flags
& MOCK_FLAGS_DEVICE_HUGE_IOVA
)
294 mock
->domain
.pgsize_bitmap
|= MOCK_HUGE_PAGE_SIZE
;
295 mock
->domain
.ops
= mock_ops
.default_domain_ops
;
296 mock
->domain
.type
= IOMMU_DOMAIN_UNMANAGED
;
297 xa_init(&mock
->pfns
);
298 return &mock
->domain
;
301 static struct iommu_domain
*
302 __mock_domain_alloc_nested(struct mock_iommu_domain
*mock_parent
,
303 const struct iommu_hwpt_selftest
*user_cfg
)
305 struct mock_iommu_domain_nested
*mock_nested
;
308 mock_nested
= kzalloc(sizeof(*mock_nested
), GFP_KERNEL
);
310 return ERR_PTR(-ENOMEM
);
311 mock_nested
->parent
= mock_parent
;
312 mock_nested
->domain
.ops
= &domain_nested_ops
;
313 mock_nested
->domain
.type
= IOMMU_DOMAIN_NESTED
;
314 for (i
= 0; i
< MOCK_NESTED_DOMAIN_IOTLB_NUM
; i
++)
315 mock_nested
->iotlb
[i
] = user_cfg
->iotlb
;
316 return &mock_nested
->domain
;
319 static struct iommu_domain
*
320 mock_domain_alloc_user(struct device
*dev
, u32 flags
,
321 struct iommu_domain
*parent
,
322 const struct iommu_user_data
*user_data
)
324 struct mock_iommu_domain
*mock_parent
;
325 struct iommu_hwpt_selftest user_cfg
;
328 /* must be mock_domain */
330 struct mock_dev
*mdev
= container_of(dev
, struct mock_dev
, dev
);
331 bool has_dirty_flag
= flags
& IOMMU_HWPT_ALLOC_DIRTY_TRACKING
;
332 bool no_dirty_ops
= mdev
->flags
& MOCK_FLAGS_DEVICE_NO_DIRTY
;
333 struct iommu_domain
*domain
;
335 if (flags
& (~(IOMMU_HWPT_ALLOC_NEST_PARENT
|
336 IOMMU_HWPT_ALLOC_DIRTY_TRACKING
)))
337 return ERR_PTR(-EOPNOTSUPP
);
338 if (user_data
|| (has_dirty_flag
&& no_dirty_ops
))
339 return ERR_PTR(-EOPNOTSUPP
);
340 domain
= mock_domain_alloc_paging(dev
);
342 return ERR_PTR(-ENOMEM
);
344 container_of(domain
, struct mock_iommu_domain
, domain
)
345 ->domain
.dirty_ops
= &dirty_ops
;
349 /* must be mock_domain_nested */
350 if (user_data
->type
!= IOMMU_HWPT_DATA_SELFTEST
|| flags
)
351 return ERR_PTR(-EOPNOTSUPP
);
352 if (!parent
|| parent
->ops
!= mock_ops
.default_domain_ops
)
353 return ERR_PTR(-EINVAL
);
355 mock_parent
= container_of(parent
, struct mock_iommu_domain
, domain
);
357 return ERR_PTR(-EINVAL
);
359 rc
= iommu_copy_struct_from_user(&user_cfg
, user_data
,
360 IOMMU_HWPT_DATA_SELFTEST
, iotlb
);
364 return __mock_domain_alloc_nested(mock_parent
, &user_cfg
);
367 static void mock_domain_free(struct iommu_domain
*domain
)
369 struct mock_iommu_domain
*mock
=
370 container_of(domain
, struct mock_iommu_domain
, domain
);
372 WARN_ON(!xa_empty(&mock
->pfns
));
376 static int mock_domain_map_pages(struct iommu_domain
*domain
,
377 unsigned long iova
, phys_addr_t paddr
,
378 size_t pgsize
, size_t pgcount
, int prot
,
379 gfp_t gfp
, size_t *mapped
)
381 struct mock_iommu_domain
*mock
=
382 container_of(domain
, struct mock_iommu_domain
, domain
);
383 unsigned long flags
= MOCK_PFN_START_IOVA
;
384 unsigned long start_iova
= iova
;
387 * xarray does not reliably work with fault injection because it does a
388 * retry allocation, so put our own failure point.
390 if (iommufd_should_fail())
393 WARN_ON(iova
% MOCK_IO_PAGE_SIZE
);
394 WARN_ON(pgsize
% MOCK_IO_PAGE_SIZE
);
395 for (; pgcount
; pgcount
--) {
398 for (cur
= 0; cur
!= pgsize
; cur
+= MOCK_IO_PAGE_SIZE
) {
401 if (pgcount
== 1 && cur
+ MOCK_IO_PAGE_SIZE
== pgsize
)
402 flags
= MOCK_PFN_LAST_IOVA
;
403 if (pgsize
!= MOCK_IO_PAGE_SIZE
) {
404 flags
|= MOCK_PFN_HUGE_IOVA
;
406 old
= xa_store(&mock
->pfns
, iova
/ MOCK_IO_PAGE_SIZE
,
407 xa_mk_value((paddr
/ MOCK_IO_PAGE_SIZE
) |
410 if (xa_is_err(old
)) {
411 for (; start_iova
!= iova
;
412 start_iova
+= MOCK_IO_PAGE_SIZE
)
413 xa_erase(&mock
->pfns
,
419 iova
+= MOCK_IO_PAGE_SIZE
;
420 paddr
+= MOCK_IO_PAGE_SIZE
;
421 *mapped
+= MOCK_IO_PAGE_SIZE
;
428 static size_t mock_domain_unmap_pages(struct iommu_domain
*domain
,
429 unsigned long iova
, size_t pgsize
,
431 struct iommu_iotlb_gather
*iotlb_gather
)
433 struct mock_iommu_domain
*mock
=
434 container_of(domain
, struct mock_iommu_domain
, domain
);
439 WARN_ON(iova
% MOCK_IO_PAGE_SIZE
);
440 WARN_ON(pgsize
% MOCK_IO_PAGE_SIZE
);
442 for (; pgcount
; pgcount
--) {
445 for (cur
= 0; cur
!= pgsize
; cur
+= MOCK_IO_PAGE_SIZE
) {
446 ent
= xa_erase(&mock
->pfns
, iova
/ MOCK_IO_PAGE_SIZE
);
449 * iommufd generates unmaps that must be a strict
450 * superset of the map's performend So every
451 * starting/ending IOVA should have been an iova passed
454 * This simple logic doesn't work when the HUGE_PAGE is
455 * turned on since the core code will automatically
456 * switch between the two page sizes creating a break in
457 * the unmap calls. The break can land in the middle of
460 if (!(domain
->pgsize_bitmap
& MOCK_HUGE_PAGE_SIZE
)) {
462 WARN_ON(ent
&& !(xa_to_value(ent
) &
463 MOCK_PFN_START_IOVA
));
467 cur
+ MOCK_IO_PAGE_SIZE
== pgsize
)
468 WARN_ON(ent
&& !(xa_to_value(ent
) &
469 MOCK_PFN_LAST_IOVA
));
472 iova
+= MOCK_IO_PAGE_SIZE
;
473 ret
+= MOCK_IO_PAGE_SIZE
;
479 static phys_addr_t
mock_domain_iova_to_phys(struct iommu_domain
*domain
,
482 struct mock_iommu_domain
*mock
=
483 container_of(domain
, struct mock_iommu_domain
, domain
);
486 WARN_ON(iova
% MOCK_IO_PAGE_SIZE
);
487 ent
= xa_load(&mock
->pfns
, iova
/ MOCK_IO_PAGE_SIZE
);
489 return (xa_to_value(ent
) & MOCK_PFN_MASK
) * MOCK_IO_PAGE_SIZE
;
492 static bool mock_domain_capable(struct device
*dev
, enum iommu_cap cap
)
494 struct mock_dev
*mdev
= container_of(dev
, struct mock_dev
, dev
);
497 case IOMMU_CAP_CACHE_COHERENCY
:
499 case IOMMU_CAP_DIRTY_TRACKING
:
500 return !(mdev
->flags
& MOCK_FLAGS_DEVICE_NO_DIRTY
);
508 static struct iopf_queue
*mock_iommu_iopf_queue
;
510 static struct iommu_device mock_iommu_device
= {
513 static struct iommu_device
*mock_probe_device(struct device
*dev
)
515 if (dev
->bus
!= &iommufd_mock_bus_type
.bus
)
516 return ERR_PTR(-ENODEV
);
517 return &mock_iommu_device
;
520 static void mock_domain_page_response(struct device
*dev
, struct iopf_fault
*evt
,
521 struct iommu_page_response
*msg
)
525 static int mock_dev_enable_feat(struct device
*dev
, enum iommu_dev_features feat
)
527 if (feat
!= IOMMU_DEV_FEAT_IOPF
|| !mock_iommu_iopf_queue
)
530 return iopf_queue_add_device(mock_iommu_iopf_queue
, dev
);
533 static int mock_dev_disable_feat(struct device
*dev
, enum iommu_dev_features feat
)
535 if (feat
!= IOMMU_DEV_FEAT_IOPF
|| !mock_iommu_iopf_queue
)
538 iopf_queue_remove_device(mock_iommu_iopf_queue
, dev
);
543 static const struct iommu_ops mock_ops
= {
545 * IOMMU_DOMAIN_BLOCKED cannot be returned from def_domain_type()
546 * because it is zero.
548 .default_domain
= &mock_blocking_domain
,
549 .blocked_domain
= &mock_blocking_domain
,
550 .owner
= THIS_MODULE
,
551 .pgsize_bitmap
= MOCK_IO_PAGE_SIZE
,
552 .hw_info
= mock_domain_hw_info
,
553 .domain_alloc_paging
= mock_domain_alloc_paging
,
554 .domain_alloc_user
= mock_domain_alloc_user
,
555 .capable
= mock_domain_capable
,
556 .device_group
= generic_device_group
,
557 .probe_device
= mock_probe_device
,
558 .page_response
= mock_domain_page_response
,
559 .dev_enable_feat
= mock_dev_enable_feat
,
560 .dev_disable_feat
= mock_dev_disable_feat
,
561 .user_pasid_table
= true,
562 .default_domain_ops
=
563 &(struct iommu_domain_ops
){
564 .free
= mock_domain_free
,
565 .attach_dev
= mock_domain_nop_attach
,
566 .map_pages
= mock_domain_map_pages
,
567 .unmap_pages
= mock_domain_unmap_pages
,
568 .iova_to_phys
= mock_domain_iova_to_phys
,
572 static void mock_domain_free_nested(struct iommu_domain
*domain
)
574 struct mock_iommu_domain_nested
*mock_nested
=
575 container_of(domain
, struct mock_iommu_domain_nested
, domain
);
581 mock_domain_cache_invalidate_user(struct iommu_domain
*domain
,
582 struct iommu_user_data_array
*array
)
584 struct mock_iommu_domain_nested
*mock_nested
=
585 container_of(domain
, struct mock_iommu_domain_nested
, domain
);
586 struct iommu_hwpt_invalidate_selftest inv
;
591 if (array
->type
!= IOMMU_HWPT_INVALIDATE_DATA_SELFTEST
) {
596 for ( ; i
< array
->entry_num
; i
++) {
597 rc
= iommu_copy_struct_from_user_array(&inv
, array
,
598 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST
,
603 if (inv
.flags
& ~IOMMU_TEST_INVALIDATE_FLAG_ALL
) {
608 if (inv
.iotlb_id
> MOCK_NESTED_DOMAIN_IOTLB_ID_MAX
) {
613 if (inv
.flags
& IOMMU_TEST_INVALIDATE_FLAG_ALL
) {
614 /* Invalidate all mock iotlb entries and ignore iotlb_id */
615 for (j
= 0; j
< MOCK_NESTED_DOMAIN_IOTLB_NUM
; j
++)
616 mock_nested
->iotlb
[j
] = 0;
618 mock_nested
->iotlb
[inv
.iotlb_id
] = 0;
625 array
->entry_num
= processed
;
629 static struct iommu_domain_ops domain_nested_ops
= {
630 .free
= mock_domain_free_nested
,
631 .attach_dev
= mock_domain_nop_attach
,
632 .cache_invalidate_user
= mock_domain_cache_invalidate_user
,
635 static inline struct iommufd_hw_pagetable
*
636 __get_md_pagetable(struct iommufd_ucmd
*ucmd
, u32 mockpt_id
, u32 hwpt_type
)
638 struct iommufd_object
*obj
;
640 obj
= iommufd_get_object(ucmd
->ictx
, mockpt_id
, hwpt_type
);
642 return ERR_CAST(obj
);
643 return container_of(obj
, struct iommufd_hw_pagetable
, obj
);
646 static inline struct iommufd_hw_pagetable
*
647 get_md_pagetable(struct iommufd_ucmd
*ucmd
, u32 mockpt_id
,
648 struct mock_iommu_domain
**mock
)
650 struct iommufd_hw_pagetable
*hwpt
;
652 hwpt
= __get_md_pagetable(ucmd
, mockpt_id
, IOMMUFD_OBJ_HWPT_PAGING
);
655 if (hwpt
->domain
->type
!= IOMMU_DOMAIN_UNMANAGED
||
656 hwpt
->domain
->ops
!= mock_ops
.default_domain_ops
) {
657 iommufd_put_object(ucmd
->ictx
, &hwpt
->obj
);
658 return ERR_PTR(-EINVAL
);
660 *mock
= container_of(hwpt
->domain
, struct mock_iommu_domain
, domain
);
664 static inline struct iommufd_hw_pagetable
*
665 get_md_pagetable_nested(struct iommufd_ucmd
*ucmd
, u32 mockpt_id
,
666 struct mock_iommu_domain_nested
**mock_nested
)
668 struct iommufd_hw_pagetable
*hwpt
;
670 hwpt
= __get_md_pagetable(ucmd
, mockpt_id
, IOMMUFD_OBJ_HWPT_NESTED
);
673 if (hwpt
->domain
->type
!= IOMMU_DOMAIN_NESTED
||
674 hwpt
->domain
->ops
!= &domain_nested_ops
) {
675 iommufd_put_object(ucmd
->ictx
, &hwpt
->obj
);
676 return ERR_PTR(-EINVAL
);
678 *mock_nested
= container_of(hwpt
->domain
,
679 struct mock_iommu_domain_nested
, domain
);
683 static void mock_dev_release(struct device
*dev
)
685 struct mock_dev
*mdev
= container_of(dev
, struct mock_dev
, dev
);
687 ida_free(&mock_dev_ida
, mdev
->id
);
691 static struct mock_dev
*mock_dev_create(unsigned long dev_flags
)
693 struct mock_dev
*mdev
;
697 ~(MOCK_FLAGS_DEVICE_NO_DIRTY
| MOCK_FLAGS_DEVICE_HUGE_IOVA
))
698 return ERR_PTR(-EINVAL
);
700 mdev
= kzalloc(sizeof(*mdev
), GFP_KERNEL
);
702 return ERR_PTR(-ENOMEM
);
704 device_initialize(&mdev
->dev
);
705 mdev
->flags
= dev_flags
;
706 mdev
->dev
.release
= mock_dev_release
;
707 mdev
->dev
.bus
= &iommufd_mock_bus_type
.bus
;
709 rc
= ida_alloc(&mock_dev_ida
, GFP_KERNEL
);
714 rc
= dev_set_name(&mdev
->dev
, "iommufd_mock%u", mdev
->id
);
718 rc
= device_add(&mdev
->dev
);
724 put_device(&mdev
->dev
);
728 static void mock_dev_destroy(struct mock_dev
*mdev
)
730 device_unregister(&mdev
->dev
);
733 bool iommufd_selftest_is_mock_dev(struct device
*dev
)
735 return dev
->release
== mock_dev_release
;
738 /* Create an hw_pagetable with the mock domain so we can test the domain ops */
739 static int iommufd_test_mock_domain(struct iommufd_ucmd
*ucmd
,
740 struct iommu_test_cmd
*cmd
)
742 struct iommufd_device
*idev
;
743 struct selftest_obj
*sobj
;
749 sobj
= iommufd_object_alloc(ucmd
->ictx
, sobj
, IOMMUFD_OBJ_SELFTEST
);
751 return PTR_ERR(sobj
);
753 sobj
->idev
.ictx
= ucmd
->ictx
;
754 sobj
->type
= TYPE_IDEV
;
756 if (cmd
->op
== IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS
)
757 dev_flags
= cmd
->mock_domain_flags
.dev_flags
;
759 sobj
->idev
.mock_dev
= mock_dev_create(dev_flags
);
760 if (IS_ERR(sobj
->idev
.mock_dev
)) {
761 rc
= PTR_ERR(sobj
->idev
.mock_dev
);
765 idev
= iommufd_device_bind(ucmd
->ictx
, &sobj
->idev
.mock_dev
->dev
,
771 sobj
->idev
.idev
= idev
;
773 rc
= iommufd_device_attach(idev
, &pt_id
);
777 /* Userspace must destroy the device_id to destroy the object */
778 cmd
->mock_domain
.out_hwpt_id
= pt_id
;
779 cmd
->mock_domain
.out_stdev_id
= sobj
->obj
.id
;
780 cmd
->mock_domain
.out_idev_id
= idev_id
;
781 rc
= iommufd_ucmd_respond(ucmd
, sizeof(*cmd
));
784 iommufd_object_finalize(ucmd
->ictx
, &sobj
->obj
);
788 iommufd_device_detach(idev
);
790 iommufd_device_unbind(idev
);
792 mock_dev_destroy(sobj
->idev
.mock_dev
);
794 iommufd_object_abort(ucmd
->ictx
, &sobj
->obj
);
798 /* Replace the mock domain with a manually allocated hw_pagetable */
799 static int iommufd_test_mock_domain_replace(struct iommufd_ucmd
*ucmd
,
800 unsigned int device_id
, u32 pt_id
,
801 struct iommu_test_cmd
*cmd
)
803 struct iommufd_object
*dev_obj
;
804 struct selftest_obj
*sobj
;
808 * Prefer to use the OBJ_SELFTEST because the destroy_rwsem will ensure
809 * it doesn't race with detach, which is not allowed.
812 iommufd_get_object(ucmd
->ictx
, device_id
, IOMMUFD_OBJ_SELFTEST
);
814 return PTR_ERR(dev_obj
);
816 sobj
= container_of(dev_obj
, struct selftest_obj
, obj
);
817 if (sobj
->type
!= TYPE_IDEV
) {
822 rc
= iommufd_device_replace(sobj
->idev
.idev
, &pt_id
);
826 cmd
->mock_domain_replace
.pt_id
= pt_id
;
827 rc
= iommufd_ucmd_respond(ucmd
, sizeof(*cmd
));
830 iommufd_put_object(ucmd
->ictx
, dev_obj
);
834 /* Add an additional reserved IOVA to the IOAS */
835 static int iommufd_test_add_reserved(struct iommufd_ucmd
*ucmd
,
836 unsigned int mockpt_id
,
837 unsigned long start
, size_t length
)
839 struct iommufd_ioas
*ioas
;
842 ioas
= iommufd_get_ioas(ucmd
->ictx
, mockpt_id
);
844 return PTR_ERR(ioas
);
845 down_write(&ioas
->iopt
.iova_rwsem
);
846 rc
= iopt_reserve_iova(&ioas
->iopt
, start
, start
+ length
- 1, NULL
);
847 up_write(&ioas
->iopt
.iova_rwsem
);
848 iommufd_put_object(ucmd
->ictx
, &ioas
->obj
);
852 /* Check that every pfn under each iova matches the pfn under a user VA */
853 static int iommufd_test_md_check_pa(struct iommufd_ucmd
*ucmd
,
854 unsigned int mockpt_id
, unsigned long iova
,
855 size_t length
, void __user
*uptr
)
857 struct iommufd_hw_pagetable
*hwpt
;
858 struct mock_iommu_domain
*mock
;
862 if (iova
% MOCK_IO_PAGE_SIZE
|| length
% MOCK_IO_PAGE_SIZE
||
863 (uintptr_t)uptr
% MOCK_IO_PAGE_SIZE
||
864 check_add_overflow((uintptr_t)uptr
, (uintptr_t)length
, &end
))
867 hwpt
= get_md_pagetable(ucmd
, mockpt_id
, &mock
);
869 return PTR_ERR(hwpt
);
871 for (; length
; length
-= MOCK_IO_PAGE_SIZE
) {
872 struct page
*pages
[1];
877 npages
= get_user_pages_fast((uintptr_t)uptr
& PAGE_MASK
, 1, 0,
883 if (WARN_ON(npages
!= 1)) {
887 pfn
= page_to_pfn(pages
[0]);
890 ent
= xa_load(&mock
->pfns
, iova
/ MOCK_IO_PAGE_SIZE
);
892 (xa_to_value(ent
) & MOCK_PFN_MASK
) * MOCK_IO_PAGE_SIZE
!=
893 pfn
* PAGE_SIZE
+ ((uintptr_t)uptr
% PAGE_SIZE
)) {
897 iova
+= MOCK_IO_PAGE_SIZE
;
898 uptr
+= MOCK_IO_PAGE_SIZE
;
903 iommufd_put_object(ucmd
->ictx
, &hwpt
->obj
);
907 /* Check that the page ref count matches, to look for missing pin/unpins */
908 static int iommufd_test_md_check_refs(struct iommufd_ucmd
*ucmd
,
909 void __user
*uptr
, size_t length
,
914 if (length
% PAGE_SIZE
|| (uintptr_t)uptr
% PAGE_SIZE
||
915 check_add_overflow((uintptr_t)uptr
, (uintptr_t)length
, &end
))
918 for (; length
; length
-= PAGE_SIZE
) {
919 struct page
*pages
[1];
922 npages
= get_user_pages_fast((uintptr_t)uptr
, 1, 0, pages
);
925 if (WARN_ON(npages
!= 1))
927 if (!PageCompound(pages
[0])) {
930 count
= page_ref_count(pages
[0]);
931 if (count
/ GUP_PIN_COUNTING_BIAS
!= refs
) {
942 static int iommufd_test_md_check_iotlb(struct iommufd_ucmd
*ucmd
,
943 u32 mockpt_id
, unsigned int iotlb_id
,
946 struct mock_iommu_domain_nested
*mock_nested
;
947 struct iommufd_hw_pagetable
*hwpt
;
950 hwpt
= get_md_pagetable_nested(ucmd
, mockpt_id
, &mock_nested
);
952 return PTR_ERR(hwpt
);
954 mock_nested
= container_of(hwpt
->domain
,
955 struct mock_iommu_domain_nested
, domain
);
957 if (iotlb_id
> MOCK_NESTED_DOMAIN_IOTLB_ID_MAX
||
958 mock_nested
->iotlb
[iotlb_id
] != iotlb
)
960 iommufd_put_object(ucmd
->ictx
, &hwpt
->obj
);
964 struct selftest_access
{
965 struct iommufd_access
*access
;
968 struct list_head items
;
969 unsigned int next_id
;
973 struct selftest_access_item
{
974 struct list_head items_elm
;
980 static const struct file_operations iommfd_test_staccess_fops
;
982 static struct selftest_access
*iommufd_access_get(int fd
)
988 return ERR_PTR(-EBADFD
);
990 if (file
->f_op
!= &iommfd_test_staccess_fops
) {
992 return ERR_PTR(-EBADFD
);
994 return file
->private_data
;
997 static void iommufd_test_access_unmap(void *data
, unsigned long iova
,
998 unsigned long length
)
1000 unsigned long iova_last
= iova
+ length
- 1;
1001 struct selftest_access
*staccess
= data
;
1002 struct selftest_access_item
*item
;
1003 struct selftest_access_item
*tmp
;
1005 mutex_lock(&staccess
->lock
);
1006 list_for_each_entry_safe(item
, tmp
, &staccess
->items
, items_elm
) {
1007 if (iova
> item
->iova
+ item
->length
- 1 ||
1008 iova_last
< item
->iova
)
1010 list_del(&item
->items_elm
);
1011 iommufd_access_unpin_pages(staccess
->access
, item
->iova
,
1015 mutex_unlock(&staccess
->lock
);
1018 static int iommufd_test_access_item_destroy(struct iommufd_ucmd
*ucmd
,
1019 unsigned int access_id
,
1020 unsigned int item_id
)
1022 struct selftest_access_item
*item
;
1023 struct selftest_access
*staccess
;
1025 staccess
= iommufd_access_get(access_id
);
1026 if (IS_ERR(staccess
))
1027 return PTR_ERR(staccess
);
1029 mutex_lock(&staccess
->lock
);
1030 list_for_each_entry(item
, &staccess
->items
, items_elm
) {
1031 if (item
->id
== item_id
) {
1032 list_del(&item
->items_elm
);
1033 iommufd_access_unpin_pages(staccess
->access
, item
->iova
,
1035 mutex_unlock(&staccess
->lock
);
1037 fput(staccess
->file
);
1041 mutex_unlock(&staccess
->lock
);
1042 fput(staccess
->file
);
1046 static int iommufd_test_staccess_release(struct inode
*inode
,
1049 struct selftest_access
*staccess
= filep
->private_data
;
1051 if (staccess
->access
) {
1052 iommufd_test_access_unmap(staccess
, 0, ULONG_MAX
);
1053 iommufd_access_destroy(staccess
->access
);
1055 mutex_destroy(&staccess
->lock
);
1060 static const struct iommufd_access_ops selftest_access_ops_pin
= {
1061 .needs_pin_pages
= 1,
1062 .unmap
= iommufd_test_access_unmap
,
1065 static const struct iommufd_access_ops selftest_access_ops
= {
1066 .unmap
= iommufd_test_access_unmap
,
1069 static const struct file_operations iommfd_test_staccess_fops
= {
1070 .release
= iommufd_test_staccess_release
,
1073 static struct selftest_access
*iommufd_test_alloc_access(void)
1075 struct selftest_access
*staccess
;
1078 staccess
= kzalloc(sizeof(*staccess
), GFP_KERNEL_ACCOUNT
);
1080 return ERR_PTR(-ENOMEM
);
1081 INIT_LIST_HEAD(&staccess
->items
);
1082 mutex_init(&staccess
->lock
);
1084 filep
= anon_inode_getfile("[iommufd_test_staccess]",
1085 &iommfd_test_staccess_fops
, staccess
,
1087 if (IS_ERR(filep
)) {
1089 return ERR_CAST(filep
);
1091 staccess
->file
= filep
;
1095 static int iommufd_test_create_access(struct iommufd_ucmd
*ucmd
,
1096 unsigned int ioas_id
, unsigned int flags
)
1098 struct iommu_test_cmd
*cmd
= ucmd
->cmd
;
1099 struct selftest_access
*staccess
;
1100 struct iommufd_access
*access
;
1105 if (flags
& ~MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES
)
1108 staccess
= iommufd_test_alloc_access();
1109 if (IS_ERR(staccess
))
1110 return PTR_ERR(staccess
);
1112 fdno
= get_unused_fd_flags(O_CLOEXEC
);
1115 goto out_free_staccess
;
1118 access
= iommufd_access_create(
1120 (flags
& MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES
) ?
1121 &selftest_access_ops_pin
:
1122 &selftest_access_ops
,
1124 if (IS_ERR(access
)) {
1125 rc
= PTR_ERR(access
);
1128 rc
= iommufd_access_attach(access
, ioas_id
);
1131 cmd
->create_access
.out_access_fd
= fdno
;
1132 rc
= iommufd_ucmd_respond(ucmd
, sizeof(*cmd
));
1136 staccess
->access
= access
;
1137 fd_install(fdno
, staccess
->file
);
1141 iommufd_access_destroy(access
);
1143 put_unused_fd(fdno
);
1145 fput(staccess
->file
);
1149 static int iommufd_test_access_replace_ioas(struct iommufd_ucmd
*ucmd
,
1150 unsigned int access_id
,
1151 unsigned int ioas_id
)
1153 struct selftest_access
*staccess
;
1156 staccess
= iommufd_access_get(access_id
);
1157 if (IS_ERR(staccess
))
1158 return PTR_ERR(staccess
);
1160 rc
= iommufd_access_replace(staccess
->access
, ioas_id
);
1161 fput(staccess
->file
);
1165 /* Check that the pages in a page array match the pages in the user VA */
1166 static int iommufd_test_check_pages(void __user
*uptr
, struct page
**pages
,
1169 for (; npages
; npages
--) {
1170 struct page
*tmp_pages
[1];
1173 rc
= get_user_pages_fast((uintptr_t)uptr
, 1, 0, tmp_pages
);
1176 if (WARN_ON(rc
!= 1))
1178 put_page(tmp_pages
[0]);
1179 if (tmp_pages
[0] != *pages
)
1187 static int iommufd_test_access_pages(struct iommufd_ucmd
*ucmd
,
1188 unsigned int access_id
, unsigned long iova
,
1189 size_t length
, void __user
*uptr
,
1192 struct iommu_test_cmd
*cmd
= ucmd
->cmd
;
1193 struct selftest_access_item
*item
;
1194 struct selftest_access
*staccess
;
1195 struct page
**pages
;
1199 /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
1200 if (length
> 16*1024*1024)
1203 if (flags
& ~(MOCK_FLAGS_ACCESS_WRITE
| MOCK_FLAGS_ACCESS_SYZ
))
1206 staccess
= iommufd_access_get(access_id
);
1207 if (IS_ERR(staccess
))
1208 return PTR_ERR(staccess
);
1210 if (staccess
->access
->ops
!= &selftest_access_ops_pin
) {
1215 if (flags
& MOCK_FLAGS_ACCESS_SYZ
)
1216 iova
= iommufd_test_syz_conv_iova(staccess
->access
,
1217 &cmd
->access_pages
.iova
);
1219 npages
= (ALIGN(iova
+ length
, PAGE_SIZE
) -
1220 ALIGN_DOWN(iova
, PAGE_SIZE
)) /
1222 pages
= kvcalloc(npages
, sizeof(*pages
), GFP_KERNEL_ACCOUNT
);
1229 * Drivers will need to think very carefully about this locking. The
1230 * core code can do multiple unmaps instantaneously after
1231 * iommufd_access_pin_pages() and *all* the unmaps must not return until
1232 * the range is unpinned. This simple implementation puts a global lock
1233 * around the pin, which may not suit drivers that want this to be a
1234 * performance path. drivers that get this wrong will trigger WARN_ON
1235 * races and cause EDEADLOCK failures to userspace.
1237 mutex_lock(&staccess
->lock
);
1238 rc
= iommufd_access_pin_pages(staccess
->access
, iova
, length
, pages
,
1239 flags
& MOCK_FLAGS_ACCESS_WRITE
);
1243 /* For syzkaller allow uptr to be NULL to skip this check */
1245 rc
= iommufd_test_check_pages(
1246 uptr
- (iova
- ALIGN_DOWN(iova
, PAGE_SIZE
)), pages
,
1252 item
= kzalloc(sizeof(*item
), GFP_KERNEL_ACCOUNT
);
1259 item
->length
= length
;
1260 item
->id
= staccess
->next_id
++;
1261 list_add_tail(&item
->items_elm
, &staccess
->items
);
1263 cmd
->access_pages
.out_access_pages_id
= item
->id
;
1264 rc
= iommufd_ucmd_respond(ucmd
, sizeof(*cmd
));
1270 list_del(&item
->items_elm
);
1273 iommufd_access_unpin_pages(staccess
->access
, iova
, length
);
1275 mutex_unlock(&staccess
->lock
);
1278 fput(staccess
->file
);
1282 static int iommufd_test_access_rw(struct iommufd_ucmd
*ucmd
,
1283 unsigned int access_id
, unsigned long iova
,
1284 size_t length
, void __user
*ubuf
,
1287 struct iommu_test_cmd
*cmd
= ucmd
->cmd
;
1288 struct selftest_access
*staccess
;
1292 /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
1293 if (length
> 16*1024*1024)
1296 if (flags
& ~(MOCK_ACCESS_RW_WRITE
| MOCK_ACCESS_RW_SLOW_PATH
|
1297 MOCK_FLAGS_ACCESS_SYZ
))
1300 staccess
= iommufd_access_get(access_id
);
1301 if (IS_ERR(staccess
))
1302 return PTR_ERR(staccess
);
1304 tmp
= kvzalloc(length
, GFP_KERNEL_ACCOUNT
);
1310 if (flags
& MOCK_ACCESS_RW_WRITE
) {
1311 if (copy_from_user(tmp
, ubuf
, length
)) {
1317 if (flags
& MOCK_FLAGS_ACCESS_SYZ
)
1318 iova
= iommufd_test_syz_conv_iova(staccess
->access
,
1319 &cmd
->access_rw
.iova
);
1321 rc
= iommufd_access_rw(staccess
->access
, iova
, tmp
, length
, flags
);
1324 if (!(flags
& MOCK_ACCESS_RW_WRITE
)) {
1325 if (copy_to_user(ubuf
, tmp
, length
)) {
1334 fput(staccess
->file
);
1337 static_assert((unsigned int)MOCK_ACCESS_RW_WRITE
== IOMMUFD_ACCESS_RW_WRITE
);
1338 static_assert((unsigned int)MOCK_ACCESS_RW_SLOW_PATH
==
1339 __IOMMUFD_ACCESS_RW_SLOW_PATH
);
1341 static int iommufd_test_dirty(struct iommufd_ucmd
*ucmd
, unsigned int mockpt_id
,
1342 unsigned long iova
, size_t length
,
1343 unsigned long page_size
, void __user
*uptr
,
1346 unsigned long i
, max
;
1347 struct iommu_test_cmd
*cmd
= ucmd
->cmd
;
1348 struct iommufd_hw_pagetable
*hwpt
;
1349 struct mock_iommu_domain
*mock
;
1353 if (!page_size
|| !length
|| iova
% page_size
|| length
% page_size
||
1357 hwpt
= get_md_pagetable(ucmd
, mockpt_id
, &mock
);
1359 return PTR_ERR(hwpt
);
1361 if (!(mock
->flags
& MOCK_DIRTY_TRACK
)) {
1366 max
= length
/ page_size
;
1367 tmp
= kvzalloc(DIV_ROUND_UP(max
, BITS_PER_LONG
) * sizeof(unsigned long),
1368 GFP_KERNEL_ACCOUNT
);
1374 if (copy_from_user(tmp
, uptr
,DIV_ROUND_UP(max
, BITS_PER_BYTE
))) {
1379 for (i
= 0; i
< max
; i
++) {
1380 unsigned long cur
= iova
+ i
* page_size
;
1383 if (!test_bit(i
, (unsigned long *)tmp
))
1386 ent
= xa_load(&mock
->pfns
, cur
/ page_size
);
1390 val
= xa_to_value(ent
) | MOCK_PFN_DIRTY_IOVA
;
1391 old
= xa_store(&mock
->pfns
, cur
/ page_size
,
1392 xa_mk_value(val
), GFP_KERNEL
);
1393 WARN_ON_ONCE(ent
!= old
);
1398 cmd
->dirty
.out_nr_dirty
= count
;
1399 rc
= iommufd_ucmd_respond(ucmd
, sizeof(*cmd
));
1403 iommufd_put_object(ucmd
->ictx
, &hwpt
->obj
);
1407 static int iommufd_test_trigger_iopf(struct iommufd_ucmd
*ucmd
,
1408 struct iommu_test_cmd
*cmd
)
1410 struct iopf_fault event
= { };
1411 struct iommufd_device
*idev
;
1413 idev
= iommufd_get_device(ucmd
, cmd
->trigger_iopf
.dev_id
);
1415 return PTR_ERR(idev
);
1417 event
.fault
.prm
.flags
= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE
;
1418 if (cmd
->trigger_iopf
.pasid
!= IOMMU_NO_PASID
)
1419 event
.fault
.prm
.flags
|= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID
;
1420 event
.fault
.type
= IOMMU_FAULT_PAGE_REQ
;
1421 event
.fault
.prm
.addr
= cmd
->trigger_iopf
.addr
;
1422 event
.fault
.prm
.pasid
= cmd
->trigger_iopf
.pasid
;
1423 event
.fault
.prm
.grpid
= cmd
->trigger_iopf
.grpid
;
1424 event
.fault
.prm
.perm
= cmd
->trigger_iopf
.perm
;
1426 iommu_report_device_fault(idev
->dev
, &event
);
1427 iommufd_put_object(ucmd
->ictx
, &idev
->obj
);
1432 void iommufd_selftest_destroy(struct iommufd_object
*obj
)
1434 struct selftest_obj
*sobj
= container_of(obj
, struct selftest_obj
, obj
);
1436 switch (sobj
->type
) {
1438 iommufd_device_detach(sobj
->idev
.idev
);
1439 iommufd_device_unbind(sobj
->idev
.idev
);
1440 mock_dev_destroy(sobj
->idev
.mock_dev
);
1445 int iommufd_test(struct iommufd_ucmd
*ucmd
)
1447 struct iommu_test_cmd
*cmd
= ucmd
->cmd
;
1450 case IOMMU_TEST_OP_ADD_RESERVED
:
1451 return iommufd_test_add_reserved(ucmd
, cmd
->id
,
1452 cmd
->add_reserved
.start
,
1453 cmd
->add_reserved
.length
);
1454 case IOMMU_TEST_OP_MOCK_DOMAIN
:
1455 case IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS
:
1456 return iommufd_test_mock_domain(ucmd
, cmd
);
1457 case IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE
:
1458 return iommufd_test_mock_domain_replace(
1459 ucmd
, cmd
->id
, cmd
->mock_domain_replace
.pt_id
, cmd
);
1460 case IOMMU_TEST_OP_MD_CHECK_MAP
:
1461 return iommufd_test_md_check_pa(
1462 ucmd
, cmd
->id
, cmd
->check_map
.iova
,
1463 cmd
->check_map
.length
,
1464 u64_to_user_ptr(cmd
->check_map
.uptr
));
1465 case IOMMU_TEST_OP_MD_CHECK_REFS
:
1466 return iommufd_test_md_check_refs(
1467 ucmd
, u64_to_user_ptr(cmd
->check_refs
.uptr
),
1468 cmd
->check_refs
.length
, cmd
->check_refs
.refs
);
1469 case IOMMU_TEST_OP_MD_CHECK_IOTLB
:
1470 return iommufd_test_md_check_iotlb(ucmd
, cmd
->id
,
1471 cmd
->check_iotlb
.id
,
1472 cmd
->check_iotlb
.iotlb
);
1473 case IOMMU_TEST_OP_CREATE_ACCESS
:
1474 return iommufd_test_create_access(ucmd
, cmd
->id
,
1475 cmd
->create_access
.flags
);
1476 case IOMMU_TEST_OP_ACCESS_REPLACE_IOAS
:
1477 return iommufd_test_access_replace_ioas(
1478 ucmd
, cmd
->id
, cmd
->access_replace_ioas
.ioas_id
);
1479 case IOMMU_TEST_OP_ACCESS_PAGES
:
1480 return iommufd_test_access_pages(
1481 ucmd
, cmd
->id
, cmd
->access_pages
.iova
,
1482 cmd
->access_pages
.length
,
1483 u64_to_user_ptr(cmd
->access_pages
.uptr
),
1484 cmd
->access_pages
.flags
);
1485 case IOMMU_TEST_OP_ACCESS_RW
:
1486 return iommufd_test_access_rw(
1487 ucmd
, cmd
->id
, cmd
->access_rw
.iova
,
1488 cmd
->access_rw
.length
,
1489 u64_to_user_ptr(cmd
->access_rw
.uptr
),
1490 cmd
->access_rw
.flags
);
1491 case IOMMU_TEST_OP_DESTROY_ACCESS_PAGES
:
1492 return iommufd_test_access_item_destroy(
1493 ucmd
, cmd
->id
, cmd
->destroy_access_pages
.access_pages_id
);
1494 case IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT
:
1495 /* Protect _batch_init(), can not be less than elmsz */
1496 if (cmd
->memory_limit
.limit
<
1497 sizeof(unsigned long) + sizeof(u32
))
1499 iommufd_test_memory_limit
= cmd
->memory_limit
.limit
;
1501 case IOMMU_TEST_OP_DIRTY
:
1502 return iommufd_test_dirty(ucmd
, cmd
->id
, cmd
->dirty
.iova
,
1504 cmd
->dirty
.page_size
,
1505 u64_to_user_ptr(cmd
->dirty
.uptr
),
1507 case IOMMU_TEST_OP_TRIGGER_IOPF
:
1508 return iommufd_test_trigger_iopf(ucmd
, cmd
);
1514 bool iommufd_should_fail(void)
1516 return should_fail(&fail_iommufd
, 1);
1519 int __init
iommufd_test_init(void)
1521 struct platform_device_info pdevinfo
= {
1522 .name
= "iommufd_selftest_iommu",
1527 fault_create_debugfs_attr("fail_iommufd", NULL
, &fail_iommufd
);
1529 selftest_iommu_dev
= platform_device_register_full(&pdevinfo
);
1530 if (IS_ERR(selftest_iommu_dev
)) {
1531 rc
= PTR_ERR(selftest_iommu_dev
);
1535 rc
= bus_register(&iommufd_mock_bus_type
.bus
);
1539 rc
= iommu_device_sysfs_add(&mock_iommu_device
,
1540 &selftest_iommu_dev
->dev
, NULL
, "%s",
1541 dev_name(&selftest_iommu_dev
->dev
));
1545 rc
= iommu_device_register_bus(&mock_iommu_device
, &mock_ops
,
1546 &iommufd_mock_bus_type
.bus
,
1547 &iommufd_mock_bus_type
.nb
);
1551 mock_iommu_iopf_queue
= iopf_queue_alloc("mock-iopfq");
1556 iommu_device_sysfs_remove(&mock_iommu_device
);
1558 bus_unregister(&iommufd_mock_bus_type
.bus
);
1560 platform_device_unregister(selftest_iommu_dev
);
1562 debugfs_remove_recursive(dbgfs_root
);
1566 void iommufd_test_exit(void)
1568 if (mock_iommu_iopf_queue
) {
1569 iopf_queue_free(mock_iommu_iopf_queue
);
1570 mock_iommu_iopf_queue
= NULL
;
1573 iommu_device_sysfs_remove(&mock_iommu_device
);
1574 iommu_device_unregister_bus(&mock_iommu_device
,
1575 &iommufd_mock_bus_type
.bus
,
1576 &iommufd_mock_bus_type
.nb
);
1577 bus_unregister(&iommufd_mock_bus_type
.bus
);
1578 platform_device_unregister(selftest_iommu_dev
);
1579 debugfs_remove_recursive(dbgfs_root
);