1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES.
4 * Kernel side components to support tools/testing/selftests/iommu
6 #include <linux/anon_inodes.h>
7 #include <linux/debugfs.h>
8 #include <linux/fault-inject.h>
9 #include <linux/file.h>
10 #include <linux/iommu.h>
11 #include <linux/platform_device.h>
12 #include <linux/slab.h>
13 #include <linux/xarray.h>
14 #include <uapi/linux/iommufd.h>
16 #include "../iommu-priv.h"
17 #include "io_pagetable.h"
18 #include "iommufd_private.h"
19 #include "iommufd_test.h"
21 static DECLARE_FAULT_ATTR(fail_iommufd
);
22 static struct dentry
*dbgfs_root
;
23 static struct platform_device
*selftest_iommu_dev
;
24 static const struct iommu_ops mock_ops
;
25 static struct iommu_domain_ops domain_nested_ops
;
27 size_t iommufd_test_memory_limit
= 65536;
29 struct mock_bus_type
{
31 struct notifier_block nb
;
34 static struct mock_bus_type iommufd_mock_bus_type
= {
36 .name
= "iommufd_mock",
40 static DEFINE_IDA(mock_dev_ida
);
44 MOCK_IO_PAGE_SIZE
= PAGE_SIZE
/ 2,
45 MOCK_HUGE_PAGE_SIZE
= 512 * MOCK_IO_PAGE_SIZE
,
48 * Like a real page table alignment requires the low bits of the address
49 * to be zero. xarray also requires the high bit to be zero, so we store
50 * the pfns shifted. The upper bits are used for metadata.
52 MOCK_PFN_MASK
= ULONG_MAX
/ MOCK_IO_PAGE_SIZE
,
54 _MOCK_PFN_START
= MOCK_PFN_MASK
+ 1,
55 MOCK_PFN_START_IOVA
= _MOCK_PFN_START
,
56 MOCK_PFN_LAST_IOVA
= _MOCK_PFN_START
,
57 MOCK_PFN_DIRTY_IOVA
= _MOCK_PFN_START
<< 1,
58 MOCK_PFN_HUGE_IOVA
= _MOCK_PFN_START
<< 2,
62 * Syzkaller has trouble randomizing the correct iova to use since it is linked
63 * to the map ioctl's output, and it has no ide about that. So, simplify things.
64 * In syzkaller mode the 64 bit IOVA is converted into an nth area and offset
65 * value. This has a much smaller randomization space and syzkaller can hit it.
67 static unsigned long __iommufd_test_syz_conv_iova(struct io_pagetable
*iopt
,
74 struct syz_layout
*syz
= (void *)iova
;
75 unsigned int nth
= syz
->nth_area
;
76 struct iopt_area
*area
;
78 down_read(&iopt
->iova_rwsem
);
79 for (area
= iopt_area_iter_first(iopt
, 0, ULONG_MAX
); area
;
80 area
= iopt_area_iter_next(area
, 0, ULONG_MAX
)) {
82 up_read(&iopt
->iova_rwsem
);
83 return iopt_area_iova(area
) + syz
->offset
;
87 up_read(&iopt
->iova_rwsem
);
92 static unsigned long iommufd_test_syz_conv_iova(struct iommufd_access
*access
,
97 mutex_lock(&access
->ioas_lock
);
99 mutex_unlock(&access
->ioas_lock
);
102 ret
= __iommufd_test_syz_conv_iova(&access
->ioas
->iopt
, iova
);
103 mutex_unlock(&access
->ioas_lock
);
107 void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd
*ucmd
,
108 unsigned int ioas_id
, u64
*iova
, u32
*flags
)
110 struct iommufd_ioas
*ioas
;
112 if (!(*flags
& MOCK_FLAGS_ACCESS_SYZ
))
114 *flags
&= ~(u32
)MOCK_FLAGS_ACCESS_SYZ
;
116 ioas
= iommufd_get_ioas(ucmd
->ictx
, ioas_id
);
119 *iova
= __iommufd_test_syz_conv_iova(&ioas
->iopt
, iova
);
120 iommufd_put_object(ucmd
->ictx
, &ioas
->obj
);
123 struct mock_iommu_domain
{
125 struct iommu_domain domain
;
129 static inline struct mock_iommu_domain
*
130 to_mock_domain(struct iommu_domain
*domain
)
132 return container_of(domain
, struct mock_iommu_domain
, domain
);
135 struct mock_iommu_domain_nested
{
136 struct iommu_domain domain
;
137 struct mock_viommu
*mock_viommu
;
138 struct mock_iommu_domain
*parent
;
139 u32 iotlb
[MOCK_NESTED_DOMAIN_IOTLB_NUM
];
142 static inline struct mock_iommu_domain_nested
*
143 to_mock_nested(struct iommu_domain
*domain
)
145 return container_of(domain
, struct mock_iommu_domain_nested
, domain
);
149 struct iommufd_viommu core
;
150 struct mock_iommu_domain
*s2_parent
;
153 static inline struct mock_viommu
*to_mock_viommu(struct iommufd_viommu
*viommu
)
155 return container_of(viommu
, struct mock_viommu
, core
);
158 enum selftest_obj_type
{
166 u32 cache
[MOCK_DEV_CACHE_NUM
];
169 static inline struct mock_dev
*to_mock_dev(struct device
*dev
)
171 return container_of(dev
, struct mock_dev
, dev
);
174 struct selftest_obj
{
175 struct iommufd_object obj
;
176 enum selftest_obj_type type
;
180 struct iommufd_device
*idev
;
181 struct iommufd_ctx
*ictx
;
182 struct mock_dev
*mock_dev
;
187 static inline struct selftest_obj
*to_selftest_obj(struct iommufd_object
*obj
)
189 return container_of(obj
, struct selftest_obj
, obj
);
192 static int mock_domain_nop_attach(struct iommu_domain
*domain
,
195 struct mock_dev
*mdev
= to_mock_dev(dev
);
197 if (domain
->dirty_ops
&& (mdev
->flags
& MOCK_FLAGS_DEVICE_NO_DIRTY
))
203 static const struct iommu_domain_ops mock_blocking_ops
= {
204 .attach_dev
= mock_domain_nop_attach
,
207 static struct iommu_domain mock_blocking_domain
= {
208 .type
= IOMMU_DOMAIN_BLOCKED
,
209 .ops
= &mock_blocking_ops
,
212 static void *mock_domain_hw_info(struct device
*dev
, u32
*length
, u32
*type
)
214 struct iommu_test_hw_info
*info
;
216 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
218 return ERR_PTR(-ENOMEM
);
220 info
->test_reg
= IOMMU_HW_INFO_SELFTEST_REGVAL
;
221 *length
= sizeof(*info
);
222 *type
= IOMMU_HW_INFO_TYPE_SELFTEST
;
227 static int mock_domain_set_dirty_tracking(struct iommu_domain
*domain
,
230 struct mock_iommu_domain
*mock
= to_mock_domain(domain
);
231 unsigned long flags
= mock
->flags
;
233 if (enable
&& !domain
->dirty_ops
)
237 if (!(enable
^ !!(flags
& MOCK_DIRTY_TRACK
)))
240 flags
= (enable
? flags
| MOCK_DIRTY_TRACK
: flags
& ~MOCK_DIRTY_TRACK
);
246 static bool mock_test_and_clear_dirty(struct mock_iommu_domain
*mock
,
247 unsigned long iova
, size_t page_size
,
250 unsigned long cur
, end
= iova
+ page_size
- 1;
254 for (cur
= iova
; cur
< end
; cur
+= MOCK_IO_PAGE_SIZE
) {
255 ent
= xa_load(&mock
->pfns
, cur
/ MOCK_IO_PAGE_SIZE
);
256 if (!ent
|| !(xa_to_value(ent
) & MOCK_PFN_DIRTY_IOVA
))
261 if (!(flags
& IOMMU_DIRTY_NO_CLEAR
)) {
264 val
= xa_to_value(ent
) & ~MOCK_PFN_DIRTY_IOVA
;
265 old
= xa_store(&mock
->pfns
, cur
/ MOCK_IO_PAGE_SIZE
,
266 xa_mk_value(val
), GFP_KERNEL
);
267 WARN_ON_ONCE(ent
!= old
);
274 static int mock_domain_read_and_clear_dirty(struct iommu_domain
*domain
,
275 unsigned long iova
, size_t size
,
277 struct iommu_dirty_bitmap
*dirty
)
279 struct mock_iommu_domain
*mock
= to_mock_domain(domain
);
280 unsigned long end
= iova
+ size
;
283 if (!(mock
->flags
& MOCK_DIRTY_TRACK
) && dirty
->bitmap
)
287 unsigned long pgsize
= MOCK_IO_PAGE_SIZE
;
290 ent
= xa_load(&mock
->pfns
, iova
/ MOCK_IO_PAGE_SIZE
);
296 if (xa_to_value(ent
) & MOCK_PFN_HUGE_IOVA
)
297 pgsize
= MOCK_HUGE_PAGE_SIZE
;
298 head
= iova
& ~(pgsize
- 1);
301 if (mock_test_and_clear_dirty(mock
, head
, pgsize
, flags
))
302 iommu_dirty_bitmap_record(dirty
, iova
, pgsize
);
304 } while (iova
< end
);
309 static const struct iommu_dirty_ops dirty_ops
= {
310 .set_dirty_tracking
= mock_domain_set_dirty_tracking
,
311 .read_and_clear_dirty
= mock_domain_read_and_clear_dirty
,
314 static struct iommu_domain
*mock_domain_alloc_paging(struct device
*dev
)
316 struct mock_dev
*mdev
= to_mock_dev(dev
);
317 struct mock_iommu_domain
*mock
;
319 mock
= kzalloc(sizeof(*mock
), GFP_KERNEL
);
322 mock
->domain
.geometry
.aperture_start
= MOCK_APERTURE_START
;
323 mock
->domain
.geometry
.aperture_end
= MOCK_APERTURE_LAST
;
324 mock
->domain
.pgsize_bitmap
= MOCK_IO_PAGE_SIZE
;
325 if (dev
&& mdev
->flags
& MOCK_FLAGS_DEVICE_HUGE_IOVA
)
326 mock
->domain
.pgsize_bitmap
|= MOCK_HUGE_PAGE_SIZE
;
327 mock
->domain
.ops
= mock_ops
.default_domain_ops
;
328 mock
->domain
.type
= IOMMU_DOMAIN_UNMANAGED
;
329 xa_init(&mock
->pfns
);
330 return &mock
->domain
;
333 static struct mock_iommu_domain_nested
*
334 __mock_domain_alloc_nested(const struct iommu_user_data
*user_data
)
336 struct mock_iommu_domain_nested
*mock_nested
;
337 struct iommu_hwpt_selftest user_cfg
;
340 if (user_data
->type
!= IOMMU_HWPT_DATA_SELFTEST
)
341 return ERR_PTR(-EOPNOTSUPP
);
343 rc
= iommu_copy_struct_from_user(&user_cfg
, user_data
,
344 IOMMU_HWPT_DATA_SELFTEST
, iotlb
);
348 mock_nested
= kzalloc(sizeof(*mock_nested
), GFP_KERNEL
);
350 return ERR_PTR(-ENOMEM
);
351 mock_nested
->domain
.ops
= &domain_nested_ops
;
352 mock_nested
->domain
.type
= IOMMU_DOMAIN_NESTED
;
353 for (i
= 0; i
< MOCK_NESTED_DOMAIN_IOTLB_NUM
; i
++)
354 mock_nested
->iotlb
[i
] = user_cfg
.iotlb
;
358 static struct iommu_domain
*
359 mock_domain_alloc_nested(struct device
*dev
, struct iommu_domain
*parent
,
360 u32 flags
, const struct iommu_user_data
*user_data
)
362 struct mock_iommu_domain_nested
*mock_nested
;
363 struct mock_iommu_domain
*mock_parent
;
366 return ERR_PTR(-EOPNOTSUPP
);
367 if (!parent
|| parent
->ops
!= mock_ops
.default_domain_ops
)
368 return ERR_PTR(-EINVAL
);
370 mock_parent
= to_mock_domain(parent
);
372 return ERR_PTR(-EINVAL
);
374 mock_nested
= __mock_domain_alloc_nested(user_data
);
375 if (IS_ERR(mock_nested
))
376 return ERR_CAST(mock_nested
);
377 mock_nested
->parent
= mock_parent
;
378 return &mock_nested
->domain
;
381 static struct iommu_domain
*
382 mock_domain_alloc_paging_flags(struct device
*dev
, u32 flags
,
383 const struct iommu_user_data
*user_data
)
385 bool has_dirty_flag
= flags
& IOMMU_HWPT_ALLOC_DIRTY_TRACKING
;
386 const u32 PAGING_FLAGS
= IOMMU_HWPT_ALLOC_DIRTY_TRACKING
|
387 IOMMU_HWPT_ALLOC_NEST_PARENT
;
388 bool no_dirty_ops
= to_mock_dev(dev
)->flags
&
389 MOCK_FLAGS_DEVICE_NO_DIRTY
;
390 struct iommu_domain
*domain
;
393 return ERR_PTR(-EOPNOTSUPP
);
394 if ((flags
& ~PAGING_FLAGS
) || (has_dirty_flag
&& no_dirty_ops
))
395 return ERR_PTR(-EOPNOTSUPP
);
397 domain
= mock_domain_alloc_paging(dev
);
399 return ERR_PTR(-ENOMEM
);
401 domain
->dirty_ops
= &dirty_ops
;
405 static void mock_domain_free(struct iommu_domain
*domain
)
407 struct mock_iommu_domain
*mock
= to_mock_domain(domain
);
409 WARN_ON(!xa_empty(&mock
->pfns
));
413 static int mock_domain_map_pages(struct iommu_domain
*domain
,
414 unsigned long iova
, phys_addr_t paddr
,
415 size_t pgsize
, size_t pgcount
, int prot
,
416 gfp_t gfp
, size_t *mapped
)
418 struct mock_iommu_domain
*mock
= to_mock_domain(domain
);
419 unsigned long flags
= MOCK_PFN_START_IOVA
;
420 unsigned long start_iova
= iova
;
423 * xarray does not reliably work with fault injection because it does a
424 * retry allocation, so put our own failure point.
426 if (iommufd_should_fail())
429 WARN_ON(iova
% MOCK_IO_PAGE_SIZE
);
430 WARN_ON(pgsize
% MOCK_IO_PAGE_SIZE
);
431 for (; pgcount
; pgcount
--) {
434 for (cur
= 0; cur
!= pgsize
; cur
+= MOCK_IO_PAGE_SIZE
) {
437 if (pgcount
== 1 && cur
+ MOCK_IO_PAGE_SIZE
== pgsize
)
438 flags
= MOCK_PFN_LAST_IOVA
;
439 if (pgsize
!= MOCK_IO_PAGE_SIZE
) {
440 flags
|= MOCK_PFN_HUGE_IOVA
;
442 old
= xa_store(&mock
->pfns
, iova
/ MOCK_IO_PAGE_SIZE
,
443 xa_mk_value((paddr
/ MOCK_IO_PAGE_SIZE
) |
446 if (xa_is_err(old
)) {
447 for (; start_iova
!= iova
;
448 start_iova
+= MOCK_IO_PAGE_SIZE
)
449 xa_erase(&mock
->pfns
,
455 iova
+= MOCK_IO_PAGE_SIZE
;
456 paddr
+= MOCK_IO_PAGE_SIZE
;
457 *mapped
+= MOCK_IO_PAGE_SIZE
;
464 static size_t mock_domain_unmap_pages(struct iommu_domain
*domain
,
465 unsigned long iova
, size_t pgsize
,
467 struct iommu_iotlb_gather
*iotlb_gather
)
469 struct mock_iommu_domain
*mock
= to_mock_domain(domain
);
474 WARN_ON(iova
% MOCK_IO_PAGE_SIZE
);
475 WARN_ON(pgsize
% MOCK_IO_PAGE_SIZE
);
477 for (; pgcount
; pgcount
--) {
480 for (cur
= 0; cur
!= pgsize
; cur
+= MOCK_IO_PAGE_SIZE
) {
481 ent
= xa_erase(&mock
->pfns
, iova
/ MOCK_IO_PAGE_SIZE
);
484 * iommufd generates unmaps that must be a strict
485 * superset of the map's performend So every
486 * starting/ending IOVA should have been an iova passed
489 * This simple logic doesn't work when the HUGE_PAGE is
490 * turned on since the core code will automatically
491 * switch between the two page sizes creating a break in
492 * the unmap calls. The break can land in the middle of
495 if (!(domain
->pgsize_bitmap
& MOCK_HUGE_PAGE_SIZE
)) {
497 WARN_ON(ent
&& !(xa_to_value(ent
) &
498 MOCK_PFN_START_IOVA
));
502 cur
+ MOCK_IO_PAGE_SIZE
== pgsize
)
503 WARN_ON(ent
&& !(xa_to_value(ent
) &
504 MOCK_PFN_LAST_IOVA
));
507 iova
+= MOCK_IO_PAGE_SIZE
;
508 ret
+= MOCK_IO_PAGE_SIZE
;
514 static phys_addr_t
mock_domain_iova_to_phys(struct iommu_domain
*domain
,
517 struct mock_iommu_domain
*mock
= to_mock_domain(domain
);
520 WARN_ON(iova
% MOCK_IO_PAGE_SIZE
);
521 ent
= xa_load(&mock
->pfns
, iova
/ MOCK_IO_PAGE_SIZE
);
523 return (xa_to_value(ent
) & MOCK_PFN_MASK
) * MOCK_IO_PAGE_SIZE
;
526 static bool mock_domain_capable(struct device
*dev
, enum iommu_cap cap
)
528 struct mock_dev
*mdev
= to_mock_dev(dev
);
531 case IOMMU_CAP_CACHE_COHERENCY
:
533 case IOMMU_CAP_DIRTY_TRACKING
:
534 return !(mdev
->flags
& MOCK_FLAGS_DEVICE_NO_DIRTY
);
542 static struct iopf_queue
*mock_iommu_iopf_queue
;
544 static struct mock_iommu_device
{
545 struct iommu_device iommu_dev
;
546 struct completion complete
;
550 static struct iommu_device
*mock_probe_device(struct device
*dev
)
552 if (dev
->bus
!= &iommufd_mock_bus_type
.bus
)
553 return ERR_PTR(-ENODEV
);
554 return &mock_iommu
.iommu_dev
;
557 static void mock_domain_page_response(struct device
*dev
, struct iopf_fault
*evt
,
558 struct iommu_page_response
*msg
)
562 static int mock_dev_enable_feat(struct device
*dev
, enum iommu_dev_features feat
)
564 if (feat
!= IOMMU_DEV_FEAT_IOPF
|| !mock_iommu_iopf_queue
)
567 return iopf_queue_add_device(mock_iommu_iopf_queue
, dev
);
570 static int mock_dev_disable_feat(struct device
*dev
, enum iommu_dev_features feat
)
572 if (feat
!= IOMMU_DEV_FEAT_IOPF
|| !mock_iommu_iopf_queue
)
575 iopf_queue_remove_device(mock_iommu_iopf_queue
, dev
);
580 static void mock_viommu_destroy(struct iommufd_viommu
*viommu
)
582 struct mock_iommu_device
*mock_iommu
= container_of(
583 viommu
->iommu_dev
, struct mock_iommu_device
, iommu_dev
);
585 if (refcount_dec_and_test(&mock_iommu
->users
))
586 complete(&mock_iommu
->complete
);
588 /* iommufd core frees mock_viommu and viommu */
591 static struct iommu_domain
*
592 mock_viommu_alloc_domain_nested(struct iommufd_viommu
*viommu
, u32 flags
,
593 const struct iommu_user_data
*user_data
)
595 struct mock_viommu
*mock_viommu
= to_mock_viommu(viommu
);
596 struct mock_iommu_domain_nested
*mock_nested
;
598 if (flags
& ~IOMMU_HWPT_FAULT_ID_VALID
)
599 return ERR_PTR(-EOPNOTSUPP
);
601 mock_nested
= __mock_domain_alloc_nested(user_data
);
602 if (IS_ERR(mock_nested
))
603 return ERR_CAST(mock_nested
);
604 mock_nested
->mock_viommu
= mock_viommu
;
605 mock_nested
->parent
= mock_viommu
->s2_parent
;
606 return &mock_nested
->domain
;
609 static int mock_viommu_cache_invalidate(struct iommufd_viommu
*viommu
,
610 struct iommu_user_data_array
*array
)
612 struct iommu_viommu_invalidate_selftest
*cmds
;
613 struct iommu_viommu_invalidate_selftest
*cur
;
614 struct iommu_viommu_invalidate_selftest
*end
;
617 /* A zero-length array is allowed to validate the array type */
618 if (array
->entry_num
== 0 &&
619 array
->type
== IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST
) {
620 array
->entry_num
= 0;
624 cmds
= kcalloc(array
->entry_num
, sizeof(*cmds
), GFP_KERNEL
);
628 end
= cmds
+ array
->entry_num
;
630 static_assert(sizeof(*cmds
) == 3 * sizeof(u32
));
631 rc
= iommu_copy_struct_from_full_user_array(
632 cmds
, sizeof(*cmds
), array
,
633 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST
);
638 struct mock_dev
*mdev
;
642 if (cur
->flags
& ~IOMMU_TEST_INVALIDATE_FLAG_ALL
) {
647 if (cur
->cache_id
> MOCK_DEV_CACHE_ID_MAX
) {
652 xa_lock(&viommu
->vdevs
);
653 dev
= iommufd_viommu_find_dev(viommu
,
654 (unsigned long)cur
->vdev_id
);
656 xa_unlock(&viommu
->vdevs
);
660 mdev
= container_of(dev
, struct mock_dev
, dev
);
662 if (cur
->flags
& IOMMU_TEST_INVALIDATE_FLAG_ALL
) {
663 /* Invalidate all cache entries and ignore cache_id */
664 for (i
= 0; i
< MOCK_DEV_CACHE_NUM
; i
++)
667 mdev
->cache
[cur
->cache_id
] = 0;
669 xa_unlock(&viommu
->vdevs
);
674 array
->entry_num
= cur
- cmds
;
679 static struct iommufd_viommu_ops mock_viommu_ops
= {
680 .destroy
= mock_viommu_destroy
,
681 .alloc_domain_nested
= mock_viommu_alloc_domain_nested
,
682 .cache_invalidate
= mock_viommu_cache_invalidate
,
685 static struct iommufd_viommu
*mock_viommu_alloc(struct device
*dev
,
686 struct iommu_domain
*domain
,
687 struct iommufd_ctx
*ictx
,
688 unsigned int viommu_type
)
690 struct mock_iommu_device
*mock_iommu
=
691 iommu_get_iommu_dev(dev
, struct mock_iommu_device
, iommu_dev
);
692 struct mock_viommu
*mock_viommu
;
694 if (viommu_type
!= IOMMU_VIOMMU_TYPE_SELFTEST
)
695 return ERR_PTR(-EOPNOTSUPP
);
697 mock_viommu
= iommufd_viommu_alloc(ictx
, struct mock_viommu
, core
,
699 if (IS_ERR(mock_viommu
))
700 return ERR_CAST(mock_viommu
);
702 refcount_inc(&mock_iommu
->users
);
703 return &mock_viommu
->core
;
706 static const struct iommu_ops mock_ops
= {
708 * IOMMU_DOMAIN_BLOCKED cannot be returned from def_domain_type()
709 * because it is zero.
711 .default_domain
= &mock_blocking_domain
,
712 .blocked_domain
= &mock_blocking_domain
,
713 .owner
= THIS_MODULE
,
714 .pgsize_bitmap
= MOCK_IO_PAGE_SIZE
,
715 .hw_info
= mock_domain_hw_info
,
716 .domain_alloc_paging
= mock_domain_alloc_paging
,
717 .domain_alloc_paging_flags
= mock_domain_alloc_paging_flags
,
718 .domain_alloc_nested
= mock_domain_alloc_nested
,
719 .capable
= mock_domain_capable
,
720 .device_group
= generic_device_group
,
721 .probe_device
= mock_probe_device
,
722 .page_response
= mock_domain_page_response
,
723 .dev_enable_feat
= mock_dev_enable_feat
,
724 .dev_disable_feat
= mock_dev_disable_feat
,
725 .user_pasid_table
= true,
726 .viommu_alloc
= mock_viommu_alloc
,
727 .default_domain_ops
=
728 &(struct iommu_domain_ops
){
729 .free
= mock_domain_free
,
730 .attach_dev
= mock_domain_nop_attach
,
731 .map_pages
= mock_domain_map_pages
,
732 .unmap_pages
= mock_domain_unmap_pages
,
733 .iova_to_phys
= mock_domain_iova_to_phys
,
737 static void mock_domain_free_nested(struct iommu_domain
*domain
)
739 kfree(to_mock_nested(domain
));
743 mock_domain_cache_invalidate_user(struct iommu_domain
*domain
,
744 struct iommu_user_data_array
*array
)
746 struct mock_iommu_domain_nested
*mock_nested
= to_mock_nested(domain
);
747 struct iommu_hwpt_invalidate_selftest inv
;
752 if (array
->type
!= IOMMU_HWPT_INVALIDATE_DATA_SELFTEST
) {
757 for ( ; i
< array
->entry_num
; i
++) {
758 rc
= iommu_copy_struct_from_user_array(&inv
, array
,
759 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST
,
764 if (inv
.flags
& ~IOMMU_TEST_INVALIDATE_FLAG_ALL
) {
769 if (inv
.iotlb_id
> MOCK_NESTED_DOMAIN_IOTLB_ID_MAX
) {
774 if (inv
.flags
& IOMMU_TEST_INVALIDATE_FLAG_ALL
) {
775 /* Invalidate all mock iotlb entries and ignore iotlb_id */
776 for (j
= 0; j
< MOCK_NESTED_DOMAIN_IOTLB_NUM
; j
++)
777 mock_nested
->iotlb
[j
] = 0;
779 mock_nested
->iotlb
[inv
.iotlb_id
] = 0;
786 array
->entry_num
= processed
;
790 static struct iommu_domain_ops domain_nested_ops
= {
791 .free
= mock_domain_free_nested
,
792 .attach_dev
= mock_domain_nop_attach
,
793 .cache_invalidate_user
= mock_domain_cache_invalidate_user
,
796 static inline struct iommufd_hw_pagetable
*
797 __get_md_pagetable(struct iommufd_ucmd
*ucmd
, u32 mockpt_id
, u32 hwpt_type
)
799 struct iommufd_object
*obj
;
801 obj
= iommufd_get_object(ucmd
->ictx
, mockpt_id
, hwpt_type
);
803 return ERR_CAST(obj
);
804 return container_of(obj
, struct iommufd_hw_pagetable
, obj
);
807 static inline struct iommufd_hw_pagetable
*
808 get_md_pagetable(struct iommufd_ucmd
*ucmd
, u32 mockpt_id
,
809 struct mock_iommu_domain
**mock
)
811 struct iommufd_hw_pagetable
*hwpt
;
813 hwpt
= __get_md_pagetable(ucmd
, mockpt_id
, IOMMUFD_OBJ_HWPT_PAGING
);
816 if (hwpt
->domain
->type
!= IOMMU_DOMAIN_UNMANAGED
||
817 hwpt
->domain
->ops
!= mock_ops
.default_domain_ops
) {
818 iommufd_put_object(ucmd
->ictx
, &hwpt
->obj
);
819 return ERR_PTR(-EINVAL
);
821 *mock
= to_mock_domain(hwpt
->domain
);
825 static inline struct iommufd_hw_pagetable
*
826 get_md_pagetable_nested(struct iommufd_ucmd
*ucmd
, u32 mockpt_id
,
827 struct mock_iommu_domain_nested
**mock_nested
)
829 struct iommufd_hw_pagetable
*hwpt
;
831 hwpt
= __get_md_pagetable(ucmd
, mockpt_id
, IOMMUFD_OBJ_HWPT_NESTED
);
834 if (hwpt
->domain
->type
!= IOMMU_DOMAIN_NESTED
||
835 hwpt
->domain
->ops
!= &domain_nested_ops
) {
836 iommufd_put_object(ucmd
->ictx
, &hwpt
->obj
);
837 return ERR_PTR(-EINVAL
);
839 *mock_nested
= to_mock_nested(hwpt
->domain
);
843 static void mock_dev_release(struct device
*dev
)
845 struct mock_dev
*mdev
= to_mock_dev(dev
);
847 ida_free(&mock_dev_ida
, mdev
->id
);
851 static struct mock_dev
*mock_dev_create(unsigned long dev_flags
)
853 struct mock_dev
*mdev
;
857 ~(MOCK_FLAGS_DEVICE_NO_DIRTY
| MOCK_FLAGS_DEVICE_HUGE_IOVA
))
858 return ERR_PTR(-EINVAL
);
860 mdev
= kzalloc(sizeof(*mdev
), GFP_KERNEL
);
862 return ERR_PTR(-ENOMEM
);
864 device_initialize(&mdev
->dev
);
865 mdev
->flags
= dev_flags
;
866 mdev
->dev
.release
= mock_dev_release
;
867 mdev
->dev
.bus
= &iommufd_mock_bus_type
.bus
;
868 for (i
= 0; i
< MOCK_DEV_CACHE_NUM
; i
++)
869 mdev
->cache
[i
] = IOMMU_TEST_DEV_CACHE_DEFAULT
;
871 rc
= ida_alloc(&mock_dev_ida
, GFP_KERNEL
);
876 rc
= dev_set_name(&mdev
->dev
, "iommufd_mock%u", mdev
->id
);
880 rc
= device_add(&mdev
->dev
);
886 put_device(&mdev
->dev
);
890 static void mock_dev_destroy(struct mock_dev
*mdev
)
892 device_unregister(&mdev
->dev
);
895 bool iommufd_selftest_is_mock_dev(struct device
*dev
)
897 return dev
->release
== mock_dev_release
;
900 /* Create an hw_pagetable with the mock domain so we can test the domain ops */
901 static int iommufd_test_mock_domain(struct iommufd_ucmd
*ucmd
,
902 struct iommu_test_cmd
*cmd
)
904 struct iommufd_device
*idev
;
905 struct selftest_obj
*sobj
;
911 sobj
= iommufd_object_alloc(ucmd
->ictx
, sobj
, IOMMUFD_OBJ_SELFTEST
);
913 return PTR_ERR(sobj
);
915 sobj
->idev
.ictx
= ucmd
->ictx
;
916 sobj
->type
= TYPE_IDEV
;
918 if (cmd
->op
== IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS
)
919 dev_flags
= cmd
->mock_domain_flags
.dev_flags
;
921 sobj
->idev
.mock_dev
= mock_dev_create(dev_flags
);
922 if (IS_ERR(sobj
->idev
.mock_dev
)) {
923 rc
= PTR_ERR(sobj
->idev
.mock_dev
);
927 idev
= iommufd_device_bind(ucmd
->ictx
, &sobj
->idev
.mock_dev
->dev
,
933 sobj
->idev
.idev
= idev
;
935 rc
= iommufd_device_attach(idev
, &pt_id
);
939 /* Userspace must destroy the device_id to destroy the object */
940 cmd
->mock_domain
.out_hwpt_id
= pt_id
;
941 cmd
->mock_domain
.out_stdev_id
= sobj
->obj
.id
;
942 cmd
->mock_domain
.out_idev_id
= idev_id
;
943 rc
= iommufd_ucmd_respond(ucmd
, sizeof(*cmd
));
946 iommufd_object_finalize(ucmd
->ictx
, &sobj
->obj
);
950 iommufd_device_detach(idev
);
952 iommufd_device_unbind(idev
);
954 mock_dev_destroy(sobj
->idev
.mock_dev
);
956 iommufd_object_abort(ucmd
->ictx
, &sobj
->obj
);
960 /* Replace the mock domain with a manually allocated hw_pagetable */
961 static int iommufd_test_mock_domain_replace(struct iommufd_ucmd
*ucmd
,
962 unsigned int device_id
, u32 pt_id
,
963 struct iommu_test_cmd
*cmd
)
965 struct iommufd_object
*dev_obj
;
966 struct selftest_obj
*sobj
;
970 * Prefer to use the OBJ_SELFTEST because the destroy_rwsem will ensure
971 * it doesn't race with detach, which is not allowed.
974 iommufd_get_object(ucmd
->ictx
, device_id
, IOMMUFD_OBJ_SELFTEST
);
976 return PTR_ERR(dev_obj
);
978 sobj
= to_selftest_obj(dev_obj
);
979 if (sobj
->type
!= TYPE_IDEV
) {
984 rc
= iommufd_device_replace(sobj
->idev
.idev
, &pt_id
);
988 cmd
->mock_domain_replace
.pt_id
= pt_id
;
989 rc
= iommufd_ucmd_respond(ucmd
, sizeof(*cmd
));
992 iommufd_put_object(ucmd
->ictx
, dev_obj
);
996 /* Add an additional reserved IOVA to the IOAS */
997 static int iommufd_test_add_reserved(struct iommufd_ucmd
*ucmd
,
998 unsigned int mockpt_id
,
999 unsigned long start
, size_t length
)
1001 struct iommufd_ioas
*ioas
;
1004 ioas
= iommufd_get_ioas(ucmd
->ictx
, mockpt_id
);
1006 return PTR_ERR(ioas
);
1007 down_write(&ioas
->iopt
.iova_rwsem
);
1008 rc
= iopt_reserve_iova(&ioas
->iopt
, start
, start
+ length
- 1, NULL
);
1009 up_write(&ioas
->iopt
.iova_rwsem
);
1010 iommufd_put_object(ucmd
->ictx
, &ioas
->obj
);
1014 /* Check that every pfn under each iova matches the pfn under a user VA */
1015 static int iommufd_test_md_check_pa(struct iommufd_ucmd
*ucmd
,
1016 unsigned int mockpt_id
, unsigned long iova
,
1017 size_t length
, void __user
*uptr
)
1019 struct iommufd_hw_pagetable
*hwpt
;
1020 struct mock_iommu_domain
*mock
;
1024 if (iova
% MOCK_IO_PAGE_SIZE
|| length
% MOCK_IO_PAGE_SIZE
||
1025 (uintptr_t)uptr
% MOCK_IO_PAGE_SIZE
||
1026 check_add_overflow((uintptr_t)uptr
, (uintptr_t)length
, &end
))
1029 hwpt
= get_md_pagetable(ucmd
, mockpt_id
, &mock
);
1031 return PTR_ERR(hwpt
);
1033 for (; length
; length
-= MOCK_IO_PAGE_SIZE
) {
1034 struct page
*pages
[1];
1039 npages
= get_user_pages_fast((uintptr_t)uptr
& PAGE_MASK
, 1, 0,
1045 if (WARN_ON(npages
!= 1)) {
1049 pfn
= page_to_pfn(pages
[0]);
1052 ent
= xa_load(&mock
->pfns
, iova
/ MOCK_IO_PAGE_SIZE
);
1054 (xa_to_value(ent
) & MOCK_PFN_MASK
) * MOCK_IO_PAGE_SIZE
!=
1055 pfn
* PAGE_SIZE
+ ((uintptr_t)uptr
% PAGE_SIZE
)) {
1059 iova
+= MOCK_IO_PAGE_SIZE
;
1060 uptr
+= MOCK_IO_PAGE_SIZE
;
1065 iommufd_put_object(ucmd
->ictx
, &hwpt
->obj
);
1069 /* Check that the page ref count matches, to look for missing pin/unpins */
1070 static int iommufd_test_md_check_refs(struct iommufd_ucmd
*ucmd
,
1071 void __user
*uptr
, size_t length
,
1076 if (length
% PAGE_SIZE
|| (uintptr_t)uptr
% PAGE_SIZE
||
1077 check_add_overflow((uintptr_t)uptr
, (uintptr_t)length
, &end
))
1080 for (; length
; length
-= PAGE_SIZE
) {
1081 struct page
*pages
[1];
1084 npages
= get_user_pages_fast((uintptr_t)uptr
, 1, 0, pages
);
1087 if (WARN_ON(npages
!= 1))
1089 if (!PageCompound(pages
[0])) {
1092 count
= page_ref_count(pages
[0]);
1093 if (count
/ GUP_PIN_COUNTING_BIAS
!= refs
) {
1104 static int iommufd_test_md_check_iotlb(struct iommufd_ucmd
*ucmd
,
1105 u32 mockpt_id
, unsigned int iotlb_id
,
1108 struct mock_iommu_domain_nested
*mock_nested
;
1109 struct iommufd_hw_pagetable
*hwpt
;
1112 hwpt
= get_md_pagetable_nested(ucmd
, mockpt_id
, &mock_nested
);
1114 return PTR_ERR(hwpt
);
1116 mock_nested
= to_mock_nested(hwpt
->domain
);
1118 if (iotlb_id
> MOCK_NESTED_DOMAIN_IOTLB_ID_MAX
||
1119 mock_nested
->iotlb
[iotlb_id
] != iotlb
)
1121 iommufd_put_object(ucmd
->ictx
, &hwpt
->obj
);
1125 static int iommufd_test_dev_check_cache(struct iommufd_ucmd
*ucmd
, u32 idev_id
,
1126 unsigned int cache_id
, u32 cache
)
1128 struct iommufd_device
*idev
;
1129 struct mock_dev
*mdev
;
1132 idev
= iommufd_get_device(ucmd
, idev_id
);
1134 return PTR_ERR(idev
);
1135 mdev
= container_of(idev
->dev
, struct mock_dev
, dev
);
1137 if (cache_id
> MOCK_DEV_CACHE_ID_MAX
|| mdev
->cache
[cache_id
] != cache
)
1139 iommufd_put_object(ucmd
->ictx
, &idev
->obj
);
1143 struct selftest_access
{
1144 struct iommufd_access
*access
;
1147 struct list_head items
;
1148 unsigned int next_id
;
1152 struct selftest_access_item
{
1153 struct list_head items_elm
;
1159 static const struct file_operations iommfd_test_staccess_fops
;
1161 static struct selftest_access
*iommufd_access_get(int fd
)
1167 return ERR_PTR(-EBADFD
);
1169 if (file
->f_op
!= &iommfd_test_staccess_fops
) {
1171 return ERR_PTR(-EBADFD
);
1173 return file
->private_data
;
1176 static void iommufd_test_access_unmap(void *data
, unsigned long iova
,
1177 unsigned long length
)
1179 unsigned long iova_last
= iova
+ length
- 1;
1180 struct selftest_access
*staccess
= data
;
1181 struct selftest_access_item
*item
;
1182 struct selftest_access_item
*tmp
;
1184 mutex_lock(&staccess
->lock
);
1185 list_for_each_entry_safe(item
, tmp
, &staccess
->items
, items_elm
) {
1186 if (iova
> item
->iova
+ item
->length
- 1 ||
1187 iova_last
< item
->iova
)
1189 list_del(&item
->items_elm
);
1190 iommufd_access_unpin_pages(staccess
->access
, item
->iova
,
1194 mutex_unlock(&staccess
->lock
);
1197 static int iommufd_test_access_item_destroy(struct iommufd_ucmd
*ucmd
,
1198 unsigned int access_id
,
1199 unsigned int item_id
)
1201 struct selftest_access_item
*item
;
1202 struct selftest_access
*staccess
;
1204 staccess
= iommufd_access_get(access_id
);
1205 if (IS_ERR(staccess
))
1206 return PTR_ERR(staccess
);
1208 mutex_lock(&staccess
->lock
);
1209 list_for_each_entry(item
, &staccess
->items
, items_elm
) {
1210 if (item
->id
== item_id
) {
1211 list_del(&item
->items_elm
);
1212 iommufd_access_unpin_pages(staccess
->access
, item
->iova
,
1214 mutex_unlock(&staccess
->lock
);
1216 fput(staccess
->file
);
1220 mutex_unlock(&staccess
->lock
);
1221 fput(staccess
->file
);
1225 static int iommufd_test_staccess_release(struct inode
*inode
,
1228 struct selftest_access
*staccess
= filep
->private_data
;
1230 if (staccess
->access
) {
1231 iommufd_test_access_unmap(staccess
, 0, ULONG_MAX
);
1232 iommufd_access_destroy(staccess
->access
);
1234 mutex_destroy(&staccess
->lock
);
1239 static const struct iommufd_access_ops selftest_access_ops_pin
= {
1240 .needs_pin_pages
= 1,
1241 .unmap
= iommufd_test_access_unmap
,
1244 static const struct iommufd_access_ops selftest_access_ops
= {
1245 .unmap
= iommufd_test_access_unmap
,
1248 static const struct file_operations iommfd_test_staccess_fops
= {
1249 .release
= iommufd_test_staccess_release
,
1252 static struct selftest_access
*iommufd_test_alloc_access(void)
1254 struct selftest_access
*staccess
;
1257 staccess
= kzalloc(sizeof(*staccess
), GFP_KERNEL_ACCOUNT
);
1259 return ERR_PTR(-ENOMEM
);
1260 INIT_LIST_HEAD(&staccess
->items
);
1261 mutex_init(&staccess
->lock
);
1263 filep
= anon_inode_getfile("[iommufd_test_staccess]",
1264 &iommfd_test_staccess_fops
, staccess
,
1266 if (IS_ERR(filep
)) {
1268 return ERR_CAST(filep
);
1270 staccess
->file
= filep
;
1274 static int iommufd_test_create_access(struct iommufd_ucmd
*ucmd
,
1275 unsigned int ioas_id
, unsigned int flags
)
1277 struct iommu_test_cmd
*cmd
= ucmd
->cmd
;
1278 struct selftest_access
*staccess
;
1279 struct iommufd_access
*access
;
1284 if (flags
& ~MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES
)
1287 staccess
= iommufd_test_alloc_access();
1288 if (IS_ERR(staccess
))
1289 return PTR_ERR(staccess
);
1291 fdno
= get_unused_fd_flags(O_CLOEXEC
);
1294 goto out_free_staccess
;
1297 access
= iommufd_access_create(
1299 (flags
& MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES
) ?
1300 &selftest_access_ops_pin
:
1301 &selftest_access_ops
,
1303 if (IS_ERR(access
)) {
1304 rc
= PTR_ERR(access
);
1307 rc
= iommufd_access_attach(access
, ioas_id
);
1310 cmd
->create_access
.out_access_fd
= fdno
;
1311 rc
= iommufd_ucmd_respond(ucmd
, sizeof(*cmd
));
1315 staccess
->access
= access
;
1316 fd_install(fdno
, staccess
->file
);
1320 iommufd_access_destroy(access
);
1322 put_unused_fd(fdno
);
1324 fput(staccess
->file
);
1328 static int iommufd_test_access_replace_ioas(struct iommufd_ucmd
*ucmd
,
1329 unsigned int access_id
,
1330 unsigned int ioas_id
)
1332 struct selftest_access
*staccess
;
1335 staccess
= iommufd_access_get(access_id
);
1336 if (IS_ERR(staccess
))
1337 return PTR_ERR(staccess
);
1339 rc
= iommufd_access_replace(staccess
->access
, ioas_id
);
1340 fput(staccess
->file
);
1344 /* Check that the pages in a page array match the pages in the user VA */
1345 static int iommufd_test_check_pages(void __user
*uptr
, struct page
**pages
,
1348 for (; npages
; npages
--) {
1349 struct page
*tmp_pages
[1];
1352 rc
= get_user_pages_fast((uintptr_t)uptr
, 1, 0, tmp_pages
);
1355 if (WARN_ON(rc
!= 1))
1357 put_page(tmp_pages
[0]);
1358 if (tmp_pages
[0] != *pages
)
1366 static int iommufd_test_access_pages(struct iommufd_ucmd
*ucmd
,
1367 unsigned int access_id
, unsigned long iova
,
1368 size_t length
, void __user
*uptr
,
1371 struct iommu_test_cmd
*cmd
= ucmd
->cmd
;
1372 struct selftest_access_item
*item
;
1373 struct selftest_access
*staccess
;
1374 struct page
**pages
;
1378 /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
1379 if (length
> 16*1024*1024)
1382 if (flags
& ~(MOCK_FLAGS_ACCESS_WRITE
| MOCK_FLAGS_ACCESS_SYZ
))
1385 staccess
= iommufd_access_get(access_id
);
1386 if (IS_ERR(staccess
))
1387 return PTR_ERR(staccess
);
1389 if (staccess
->access
->ops
!= &selftest_access_ops_pin
) {
1394 if (flags
& MOCK_FLAGS_ACCESS_SYZ
)
1395 iova
= iommufd_test_syz_conv_iova(staccess
->access
,
1396 &cmd
->access_pages
.iova
);
1398 npages
= (ALIGN(iova
+ length
, PAGE_SIZE
) -
1399 ALIGN_DOWN(iova
, PAGE_SIZE
)) /
1401 pages
= kvcalloc(npages
, sizeof(*pages
), GFP_KERNEL_ACCOUNT
);
1408 * Drivers will need to think very carefully about this locking. The
1409 * core code can do multiple unmaps instantaneously after
1410 * iommufd_access_pin_pages() and *all* the unmaps must not return until
1411 * the range is unpinned. This simple implementation puts a global lock
1412 * around the pin, which may not suit drivers that want this to be a
1413 * performance path. drivers that get this wrong will trigger WARN_ON
1414 * races and cause EDEADLOCK failures to userspace.
1416 mutex_lock(&staccess
->lock
);
1417 rc
= iommufd_access_pin_pages(staccess
->access
, iova
, length
, pages
,
1418 flags
& MOCK_FLAGS_ACCESS_WRITE
);
1422 /* For syzkaller allow uptr to be NULL to skip this check */
1424 rc
= iommufd_test_check_pages(
1425 uptr
- (iova
- ALIGN_DOWN(iova
, PAGE_SIZE
)), pages
,
1431 item
= kzalloc(sizeof(*item
), GFP_KERNEL_ACCOUNT
);
1438 item
->length
= length
;
1439 item
->id
= staccess
->next_id
++;
1440 list_add_tail(&item
->items_elm
, &staccess
->items
);
1442 cmd
->access_pages
.out_access_pages_id
= item
->id
;
1443 rc
= iommufd_ucmd_respond(ucmd
, sizeof(*cmd
));
1449 list_del(&item
->items_elm
);
1452 iommufd_access_unpin_pages(staccess
->access
, iova
, length
);
1454 mutex_unlock(&staccess
->lock
);
1457 fput(staccess
->file
);
1461 static int iommufd_test_access_rw(struct iommufd_ucmd
*ucmd
,
1462 unsigned int access_id
, unsigned long iova
,
1463 size_t length
, void __user
*ubuf
,
1466 struct iommu_test_cmd
*cmd
= ucmd
->cmd
;
1467 struct selftest_access
*staccess
;
1471 /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
1472 if (length
> 16*1024*1024)
1475 if (flags
& ~(MOCK_ACCESS_RW_WRITE
| MOCK_ACCESS_RW_SLOW_PATH
|
1476 MOCK_FLAGS_ACCESS_SYZ
))
1479 staccess
= iommufd_access_get(access_id
);
1480 if (IS_ERR(staccess
))
1481 return PTR_ERR(staccess
);
1483 tmp
= kvzalloc(length
, GFP_KERNEL_ACCOUNT
);
1489 if (flags
& MOCK_ACCESS_RW_WRITE
) {
1490 if (copy_from_user(tmp
, ubuf
, length
)) {
1496 if (flags
& MOCK_FLAGS_ACCESS_SYZ
)
1497 iova
= iommufd_test_syz_conv_iova(staccess
->access
,
1498 &cmd
->access_rw
.iova
);
1500 rc
= iommufd_access_rw(staccess
->access
, iova
, tmp
, length
, flags
);
1503 if (!(flags
& MOCK_ACCESS_RW_WRITE
)) {
1504 if (copy_to_user(ubuf
, tmp
, length
)) {
1513 fput(staccess
->file
);
1516 static_assert((unsigned int)MOCK_ACCESS_RW_WRITE
== IOMMUFD_ACCESS_RW_WRITE
);
1517 static_assert((unsigned int)MOCK_ACCESS_RW_SLOW_PATH
==
1518 __IOMMUFD_ACCESS_RW_SLOW_PATH
);
1520 static int iommufd_test_dirty(struct iommufd_ucmd
*ucmd
, unsigned int mockpt_id
,
1521 unsigned long iova
, size_t length
,
1522 unsigned long page_size
, void __user
*uptr
,
1525 unsigned long i
, max
;
1526 struct iommu_test_cmd
*cmd
= ucmd
->cmd
;
1527 struct iommufd_hw_pagetable
*hwpt
;
1528 struct mock_iommu_domain
*mock
;
1532 if (!page_size
|| !length
|| iova
% page_size
|| length
% page_size
||
1536 hwpt
= get_md_pagetable(ucmd
, mockpt_id
, &mock
);
1538 return PTR_ERR(hwpt
);
1540 if (!(mock
->flags
& MOCK_DIRTY_TRACK
)) {
1545 max
= length
/ page_size
;
1546 tmp
= kvzalloc(DIV_ROUND_UP(max
, BITS_PER_LONG
) * sizeof(unsigned long),
1547 GFP_KERNEL_ACCOUNT
);
1553 if (copy_from_user(tmp
, uptr
,DIV_ROUND_UP(max
, BITS_PER_BYTE
))) {
1558 for (i
= 0; i
< max
; i
++) {
1559 unsigned long cur
= iova
+ i
* page_size
;
1562 if (!test_bit(i
, (unsigned long *)tmp
))
1565 ent
= xa_load(&mock
->pfns
, cur
/ page_size
);
1569 val
= xa_to_value(ent
) | MOCK_PFN_DIRTY_IOVA
;
1570 old
= xa_store(&mock
->pfns
, cur
/ page_size
,
1571 xa_mk_value(val
), GFP_KERNEL
);
1572 WARN_ON_ONCE(ent
!= old
);
1577 cmd
->dirty
.out_nr_dirty
= count
;
1578 rc
= iommufd_ucmd_respond(ucmd
, sizeof(*cmd
));
1582 iommufd_put_object(ucmd
->ictx
, &hwpt
->obj
);
1586 static int iommufd_test_trigger_iopf(struct iommufd_ucmd
*ucmd
,
1587 struct iommu_test_cmd
*cmd
)
1589 struct iopf_fault event
= { };
1590 struct iommufd_device
*idev
;
1592 idev
= iommufd_get_device(ucmd
, cmd
->trigger_iopf
.dev_id
);
1594 return PTR_ERR(idev
);
1596 event
.fault
.prm
.flags
= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE
;
1597 if (cmd
->trigger_iopf
.pasid
!= IOMMU_NO_PASID
)
1598 event
.fault
.prm
.flags
|= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID
;
1599 event
.fault
.type
= IOMMU_FAULT_PAGE_REQ
;
1600 event
.fault
.prm
.addr
= cmd
->trigger_iopf
.addr
;
1601 event
.fault
.prm
.pasid
= cmd
->trigger_iopf
.pasid
;
1602 event
.fault
.prm
.grpid
= cmd
->trigger_iopf
.grpid
;
1603 event
.fault
.prm
.perm
= cmd
->trigger_iopf
.perm
;
1605 iommu_report_device_fault(idev
->dev
, &event
);
1606 iommufd_put_object(ucmd
->ictx
, &idev
->obj
);
1611 void iommufd_selftest_destroy(struct iommufd_object
*obj
)
1613 struct selftest_obj
*sobj
= to_selftest_obj(obj
);
1615 switch (sobj
->type
) {
1617 iommufd_device_detach(sobj
->idev
.idev
);
1618 iommufd_device_unbind(sobj
->idev
.idev
);
1619 mock_dev_destroy(sobj
->idev
.mock_dev
);
1624 int iommufd_test(struct iommufd_ucmd
*ucmd
)
1626 struct iommu_test_cmd
*cmd
= ucmd
->cmd
;
1629 case IOMMU_TEST_OP_ADD_RESERVED
:
1630 return iommufd_test_add_reserved(ucmd
, cmd
->id
,
1631 cmd
->add_reserved
.start
,
1632 cmd
->add_reserved
.length
);
1633 case IOMMU_TEST_OP_MOCK_DOMAIN
:
1634 case IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS
:
1635 return iommufd_test_mock_domain(ucmd
, cmd
);
1636 case IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE
:
1637 return iommufd_test_mock_domain_replace(
1638 ucmd
, cmd
->id
, cmd
->mock_domain_replace
.pt_id
, cmd
);
1639 case IOMMU_TEST_OP_MD_CHECK_MAP
:
1640 return iommufd_test_md_check_pa(
1641 ucmd
, cmd
->id
, cmd
->check_map
.iova
,
1642 cmd
->check_map
.length
,
1643 u64_to_user_ptr(cmd
->check_map
.uptr
));
1644 case IOMMU_TEST_OP_MD_CHECK_REFS
:
1645 return iommufd_test_md_check_refs(
1646 ucmd
, u64_to_user_ptr(cmd
->check_refs
.uptr
),
1647 cmd
->check_refs
.length
, cmd
->check_refs
.refs
);
1648 case IOMMU_TEST_OP_MD_CHECK_IOTLB
:
1649 return iommufd_test_md_check_iotlb(ucmd
, cmd
->id
,
1650 cmd
->check_iotlb
.id
,
1651 cmd
->check_iotlb
.iotlb
);
1652 case IOMMU_TEST_OP_DEV_CHECK_CACHE
:
1653 return iommufd_test_dev_check_cache(ucmd
, cmd
->id
,
1654 cmd
->check_dev_cache
.id
,
1655 cmd
->check_dev_cache
.cache
);
1656 case IOMMU_TEST_OP_CREATE_ACCESS
:
1657 return iommufd_test_create_access(ucmd
, cmd
->id
,
1658 cmd
->create_access
.flags
);
1659 case IOMMU_TEST_OP_ACCESS_REPLACE_IOAS
:
1660 return iommufd_test_access_replace_ioas(
1661 ucmd
, cmd
->id
, cmd
->access_replace_ioas
.ioas_id
);
1662 case IOMMU_TEST_OP_ACCESS_PAGES
:
1663 return iommufd_test_access_pages(
1664 ucmd
, cmd
->id
, cmd
->access_pages
.iova
,
1665 cmd
->access_pages
.length
,
1666 u64_to_user_ptr(cmd
->access_pages
.uptr
),
1667 cmd
->access_pages
.flags
);
1668 case IOMMU_TEST_OP_ACCESS_RW
:
1669 return iommufd_test_access_rw(
1670 ucmd
, cmd
->id
, cmd
->access_rw
.iova
,
1671 cmd
->access_rw
.length
,
1672 u64_to_user_ptr(cmd
->access_rw
.uptr
),
1673 cmd
->access_rw
.flags
);
1674 case IOMMU_TEST_OP_DESTROY_ACCESS_PAGES
:
1675 return iommufd_test_access_item_destroy(
1676 ucmd
, cmd
->id
, cmd
->destroy_access_pages
.access_pages_id
);
1677 case IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT
:
1678 /* Protect _batch_init(), can not be less than elmsz */
1679 if (cmd
->memory_limit
.limit
<
1680 sizeof(unsigned long) + sizeof(u32
))
1682 iommufd_test_memory_limit
= cmd
->memory_limit
.limit
;
1684 case IOMMU_TEST_OP_DIRTY
:
1685 return iommufd_test_dirty(ucmd
, cmd
->id
, cmd
->dirty
.iova
,
1687 cmd
->dirty
.page_size
,
1688 u64_to_user_ptr(cmd
->dirty
.uptr
),
1690 case IOMMU_TEST_OP_TRIGGER_IOPF
:
1691 return iommufd_test_trigger_iopf(ucmd
, cmd
);
1697 bool iommufd_should_fail(void)
1699 return should_fail(&fail_iommufd
, 1);
1702 int __init
iommufd_test_init(void)
1704 struct platform_device_info pdevinfo
= {
1705 .name
= "iommufd_selftest_iommu",
1710 fault_create_debugfs_attr("fail_iommufd", NULL
, &fail_iommufd
);
1712 selftest_iommu_dev
= platform_device_register_full(&pdevinfo
);
1713 if (IS_ERR(selftest_iommu_dev
)) {
1714 rc
= PTR_ERR(selftest_iommu_dev
);
1718 rc
= bus_register(&iommufd_mock_bus_type
.bus
);
1722 rc
= iommu_device_sysfs_add(&mock_iommu
.iommu_dev
,
1723 &selftest_iommu_dev
->dev
, NULL
, "%s",
1724 dev_name(&selftest_iommu_dev
->dev
));
1728 rc
= iommu_device_register_bus(&mock_iommu
.iommu_dev
, &mock_ops
,
1729 &iommufd_mock_bus_type
.bus
,
1730 &iommufd_mock_bus_type
.nb
);
1734 refcount_set(&mock_iommu
.users
, 1);
1735 init_completion(&mock_iommu
.complete
);
1737 mock_iommu_iopf_queue
= iopf_queue_alloc("mock-iopfq");
1742 iommu_device_sysfs_remove(&mock_iommu
.iommu_dev
);
1744 bus_unregister(&iommufd_mock_bus_type
.bus
);
1746 platform_device_unregister(selftest_iommu_dev
);
1748 debugfs_remove_recursive(dbgfs_root
);
1752 static void iommufd_test_wait_for_users(void)
1754 if (refcount_dec_and_test(&mock_iommu
.users
))
1757 * Time out waiting for iommu device user count to become 0.
1759 * Note that this is just making an example here, since the selftest is
1760 * built into the iommufd module, i.e. it only unplugs the iommu device
1761 * when unloading the module. So, it is expected that this WARN_ON will
1762 * not trigger, as long as any iommufd FDs are open.
1764 WARN_ON(!wait_for_completion_timeout(&mock_iommu
.complete
,
1765 msecs_to_jiffies(10000)));
1768 void iommufd_test_exit(void)
1770 if (mock_iommu_iopf_queue
) {
1771 iopf_queue_free(mock_iommu_iopf_queue
);
1772 mock_iommu_iopf_queue
= NULL
;
1775 iommufd_test_wait_for_users();
1776 iommu_device_sysfs_remove(&mock_iommu
.iommu_dev
);
1777 iommu_device_unregister_bus(&mock_iommu
.iommu_dev
,
1778 &iommufd_mock_bus_type
.bus
,
1779 &iommufd_mock_bus_type
.nb
);
1780 bus_unregister(&iommufd_mock_bus_type
.bus
);
1781 platform_device_unregister(selftest_iommu_dev
);
1782 debugfs_remove_recursive(dbgfs_root
);