1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3 #ifndef __SELFTEST_IOMMUFD_UTILS
4 #define __SELFTEST_IOMMUFD_UTILS
13 #include "../kselftest_harness.h"
14 #include "../../../../drivers/iommu/iommufd/iommufd_test.h"
16 /* Hack to make assertions more readable */
17 #define _IOMMU_TEST_CMD(x) IOMMU_TEST_CMD
19 /* Imported from include/asm-generic/bitops/generic-non-atomic.h */
20 #define BITS_PER_BYTE 8
21 #define BITS_PER_LONG __BITS_PER_LONG
22 #define BIT_MASK(nr) (1UL << ((nr) % __BITS_PER_LONG))
23 #define BIT_WORD(nr) ((nr) / __BITS_PER_LONG)
26 IOPT_PAGES_ACCOUNT_NONE
= 0,
27 IOPT_PAGES_ACCOUNT_USER
= 1,
28 IOPT_PAGES_ACCOUNT_MM
= 2,
31 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
33 static inline void set_bit(unsigned int nr
, unsigned long *addr
)
35 unsigned long mask
= BIT_MASK(nr
);
36 unsigned long *p
= ((unsigned long *)addr
) + BIT_WORD(nr
);
41 static inline bool test_bit(unsigned int nr
, unsigned long *addr
)
43 return 1UL & (addr
[BIT_WORD(nr
)] >> (nr
& (BITS_PER_LONG
- 1)));
47 static unsigned long BUFFER_SIZE
;
49 static void *mfd_buffer
;
52 static unsigned long PAGE_SIZE
;
54 #define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
55 #define offsetofend(TYPE, MEMBER) \
56 (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
58 static inline void *memfd_mmap(size_t length
, int prot
, int flags
, int *mfd_p
)
60 int mfd_flags
= (flags
& MAP_HUGETLB
) ? MFD_HUGETLB
: 0;
61 int mfd
= memfd_create("buffer", mfd_flags
);
65 if (ftruncate(mfd
, length
))
68 return mmap(0, length
, prot
, flags
, mfd
, 0);
72 * Have the kernel check the refcount on pages. I don't know why a freshly
73 * mmap'd anon non-compound page starts out with a ref of 3
75 #define check_refs(_ptr, _length, _refs) \
77 struct iommu_test_cmd test_cmd = { \
78 .size = sizeof(test_cmd), \
79 .op = IOMMU_TEST_OP_MD_CHECK_REFS, \
80 .check_refs = { .length = _length, \
81 .uptr = (uintptr_t)(_ptr), \
86 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS), \
90 static int _test_cmd_mock_domain(int fd
, unsigned int ioas_id
, __u32
*stdev_id
,
91 __u32
*hwpt_id
, __u32
*idev_id
)
93 struct iommu_test_cmd cmd
= {
95 .op
= IOMMU_TEST_OP_MOCK_DOMAIN
,
101 ret
= ioctl(fd
, IOMMU_TEST_CMD
, &cmd
);
105 *stdev_id
= cmd
.mock_domain
.out_stdev_id
;
108 *hwpt_id
= cmd
.mock_domain
.out_hwpt_id
;
110 *idev_id
= cmd
.mock_domain
.out_idev_id
;
113 #define test_cmd_mock_domain(ioas_id, stdev_id, hwpt_id, idev_id) \
114 ASSERT_EQ(0, _test_cmd_mock_domain(self->fd, ioas_id, stdev_id, \
116 #define test_err_mock_domain(_errno, ioas_id, stdev_id, hwpt_id) \
117 EXPECT_ERRNO(_errno, _test_cmd_mock_domain(self->fd, ioas_id, \
118 stdev_id, hwpt_id, NULL))
120 static int _test_cmd_mock_domain_flags(int fd
, unsigned int ioas_id
,
121 __u32 stdev_flags
, __u32
*stdev_id
,
122 __u32
*hwpt_id
, __u32
*idev_id
)
124 struct iommu_test_cmd cmd
= {
126 .op
= IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS
,
128 .mock_domain_flags
= { .dev_flags
= stdev_flags
},
132 ret
= ioctl(fd
, IOMMU_TEST_CMD
, &cmd
);
136 *stdev_id
= cmd
.mock_domain_flags
.out_stdev_id
;
139 *hwpt_id
= cmd
.mock_domain_flags
.out_hwpt_id
;
141 *idev_id
= cmd
.mock_domain_flags
.out_idev_id
;
144 #define test_cmd_mock_domain_flags(ioas_id, flags, stdev_id, hwpt_id, idev_id) \
145 ASSERT_EQ(0, _test_cmd_mock_domain_flags(self->fd, ioas_id, flags, \
146 stdev_id, hwpt_id, idev_id))
147 #define test_err_mock_domain_flags(_errno, ioas_id, flags, stdev_id, hwpt_id) \
148 EXPECT_ERRNO(_errno, \
149 _test_cmd_mock_domain_flags(self->fd, ioas_id, flags, \
150 stdev_id, hwpt_id, NULL))
152 static int _test_cmd_mock_domain_replace(int fd
, __u32 stdev_id
, __u32 pt_id
,
155 struct iommu_test_cmd cmd
= {
157 .op
= IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE
,
159 .mock_domain_replace
= {
165 ret
= ioctl(fd
, IOMMU_TEST_CMD
, &cmd
);
169 *hwpt_id
= cmd
.mock_domain_replace
.pt_id
;
173 #define test_cmd_mock_domain_replace(stdev_id, pt_id) \
174 ASSERT_EQ(0, _test_cmd_mock_domain_replace(self->fd, stdev_id, pt_id, \
176 #define test_err_mock_domain_replace(_errno, stdev_id, pt_id) \
177 EXPECT_ERRNO(_errno, _test_cmd_mock_domain_replace(self->fd, stdev_id, \
180 static int _test_cmd_hwpt_alloc(int fd
, __u32 device_id
, __u32 pt_id
, __u32 ft_id
,
181 __u32 flags
, __u32
*hwpt_id
, __u32 data_type
,
182 void *data
, size_t data_len
)
184 struct iommu_hwpt_alloc cmd
= {
189 .data_type
= data_type
,
190 .data_len
= data_len
,
191 .data_uptr
= (uint64_t)data
,
196 ret
= ioctl(fd
, IOMMU_HWPT_ALLOC
, &cmd
);
200 *hwpt_id
= cmd
.out_hwpt_id
;
204 #define test_cmd_hwpt_alloc(device_id, pt_id, flags, hwpt_id) \
205 ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
206 hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, \
208 #define test_err_hwpt_alloc(_errno, device_id, pt_id, flags, hwpt_id) \
209 EXPECT_ERRNO(_errno, _test_cmd_hwpt_alloc( \
210 self->fd, device_id, pt_id, 0, flags, \
211 hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, 0))
213 #define test_cmd_hwpt_alloc_nested(device_id, pt_id, flags, hwpt_id, \
214 data_type, data, data_len) \
215 ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
216 hwpt_id, data_type, data, data_len))
217 #define test_err_hwpt_alloc_nested(_errno, device_id, pt_id, flags, hwpt_id, \
218 data_type, data, data_len) \
219 EXPECT_ERRNO(_errno, \
220 _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
221 hwpt_id, data_type, data, data_len))
223 #define test_cmd_hwpt_alloc_iopf(device_id, pt_id, fault_id, flags, hwpt_id, \
224 data_type, data, data_len) \
225 ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, fault_id, \
226 flags, hwpt_id, data_type, data, \
228 #define test_err_hwpt_alloc_iopf(_errno, device_id, pt_id, fault_id, flags, \
229 hwpt_id, data_type, data, data_len) \
230 EXPECT_ERRNO(_errno, \
231 _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, fault_id, \
232 flags, hwpt_id, data_type, data, \
235 #define test_cmd_hwpt_check_iotlb(hwpt_id, iotlb_id, expected) \
237 struct iommu_test_cmd test_cmd = { \
238 .size = sizeof(test_cmd), \
239 .op = IOMMU_TEST_OP_MD_CHECK_IOTLB, \
248 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_IOTLB), \
252 #define test_cmd_hwpt_check_iotlb_all(hwpt_id, expected) \
255 for (i = 0; i < MOCK_NESTED_DOMAIN_IOTLB_NUM; i++) \
256 test_cmd_hwpt_check_iotlb(hwpt_id, i, expected); \
259 #define test_cmd_dev_check_cache(device_id, cache_id, expected) \
261 struct iommu_test_cmd test_cmd = { \
262 .size = sizeof(test_cmd), \
263 .op = IOMMU_TEST_OP_DEV_CHECK_CACHE, \
265 .check_dev_cache = { \
270 ASSERT_EQ(0, ioctl(self->fd, \
272 IOMMU_TEST_OP_DEV_CHECK_CACHE), \
276 #define test_cmd_dev_check_cache_all(device_id, expected) \
279 for (c = 0; c < MOCK_DEV_CACHE_NUM; c++) \
280 test_cmd_dev_check_cache(device_id, c, expected); \
283 static int _test_cmd_hwpt_invalidate(int fd
, __u32 hwpt_id
, void *reqs
,
284 uint32_t data_type
, uint32_t lreq
,
287 struct iommu_hwpt_invalidate cmd
= {
290 .data_type
= data_type
,
291 .data_uptr
= (uint64_t)reqs
,
295 int rc
= ioctl(fd
, IOMMU_HWPT_INVALIDATE
, &cmd
);
296 *nreqs
= cmd
.entry_num
;
300 #define test_cmd_hwpt_invalidate(hwpt_id, reqs, data_type, lreq, nreqs) \
303 _test_cmd_hwpt_invalidate(self->fd, hwpt_id, reqs, \
304 data_type, lreq, nreqs)); \
306 #define test_err_hwpt_invalidate(_errno, hwpt_id, reqs, data_type, lreq, \
309 EXPECT_ERRNO(_errno, _test_cmd_hwpt_invalidate( \
310 self->fd, hwpt_id, reqs, \
311 data_type, lreq, nreqs)); \
314 static int _test_cmd_viommu_invalidate(int fd
, __u32 viommu_id
, void *reqs
,
315 uint32_t data_type
, uint32_t lreq
,
318 struct iommu_hwpt_invalidate cmd
= {
320 .hwpt_id
= viommu_id
,
321 .data_type
= data_type
,
322 .data_uptr
= (uint64_t)reqs
,
326 int rc
= ioctl(fd
, IOMMU_HWPT_INVALIDATE
, &cmd
);
327 *nreqs
= cmd
.entry_num
;
331 #define test_cmd_viommu_invalidate(viommu, reqs, lreq, nreqs) \
334 _test_cmd_viommu_invalidate(self->fd, viommu, reqs, \
335 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST, \
338 #define test_err_viommu_invalidate(_errno, viommu_id, reqs, data_type, lreq, \
341 EXPECT_ERRNO(_errno, _test_cmd_viommu_invalidate( \
342 self->fd, viommu_id, reqs, \
343 data_type, lreq, nreqs)); \
346 static int _test_cmd_access_replace_ioas(int fd
, __u32 access_id
,
347 unsigned int ioas_id
)
349 struct iommu_test_cmd cmd
= {
351 .op
= IOMMU_TEST_OP_ACCESS_REPLACE_IOAS
,
353 .access_replace_ioas
= { .ioas_id
= ioas_id
},
357 ret
= ioctl(fd
, IOMMU_TEST_CMD
, &cmd
);
362 #define test_cmd_access_replace_ioas(access_id, ioas_id) \
363 ASSERT_EQ(0, _test_cmd_access_replace_ioas(self->fd, access_id, ioas_id))
365 static int _test_cmd_set_dirty_tracking(int fd
, __u32 hwpt_id
, bool enabled
)
367 struct iommu_hwpt_set_dirty_tracking cmd
= {
369 .flags
= enabled
? IOMMU_HWPT_DIRTY_TRACKING_ENABLE
: 0,
374 ret
= ioctl(fd
, IOMMU_HWPT_SET_DIRTY_TRACKING
, &cmd
);
379 #define test_cmd_set_dirty_tracking(hwpt_id, enabled) \
380 ASSERT_EQ(0, _test_cmd_set_dirty_tracking(self->fd, hwpt_id, enabled))
382 static int _test_cmd_get_dirty_bitmap(int fd
, __u32 hwpt_id
, size_t length
,
383 __u64 iova
, size_t page_size
,
384 __u64
*bitmap
, __u32 flags
)
386 struct iommu_hwpt_get_dirty_bitmap cmd
= {
392 .page_size
= page_size
,
393 .data
= (uintptr_t)bitmap
,
397 ret
= ioctl(fd
, IOMMU_HWPT_GET_DIRTY_BITMAP
, &cmd
);
403 #define test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, \
405 ASSERT_EQ(0, _test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, \
406 page_size, bitmap, flags))
408 static int _test_cmd_mock_domain_set_dirty(int fd
, __u32 hwpt_id
, size_t length
,
409 __u64 iova
, size_t page_size
,
410 __u64
*bitmap
, __u64
*dirty
)
412 struct iommu_test_cmd cmd
= {
414 .op
= IOMMU_TEST_OP_DIRTY
,
419 .page_size
= page_size
,
420 .uptr
= (uintptr_t)bitmap
,
425 ret
= ioctl(fd
, _IOMMU_TEST_CMD(IOMMU_TEST_OP_DIRTY
), &cmd
);
429 *dirty
= cmd
.dirty
.out_nr_dirty
;
433 #define test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size, \
436 _test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, \
437 page_size, bitmap, nr))
439 static int _test_mock_dirty_bitmaps(int fd
, __u32 hwpt_id
, size_t length
,
440 __u64 iova
, size_t page_size
,
441 size_t pte_page_size
, __u64
*bitmap
,
442 __u64 nbits
, __u32 flags
,
443 struct __test_metadata
*_metadata
)
445 unsigned long npte
= pte_page_size
/ page_size
, pteset
= 2 * npte
;
446 unsigned long j
, i
, nr
= nbits
/ pteset
?: 1;
447 unsigned long bitmap_size
= DIV_ROUND_UP(nbits
, BITS_PER_BYTE
);
450 /* Mark all even bits as dirty in the mock domain */
451 memset(bitmap
, 0, bitmap_size
);
452 for (i
= 0; i
< nbits
; i
+= pteset
)
453 set_bit(i
, (unsigned long *)bitmap
);
455 test_cmd_mock_domain_set_dirty(fd
, hwpt_id
, length
, iova
, page_size
,
457 ASSERT_EQ(nr
, out_dirty
);
459 /* Expect all even bits as dirty in the user bitmap */
460 memset(bitmap
, 0, bitmap_size
);
461 test_cmd_get_dirty_bitmap(fd
, hwpt_id
, length
, iova
, page_size
, bitmap
,
463 /* Beware ASSERT_EQ() is two statements -- braces are not redundant! */
464 for (i
= 0; i
< nbits
; i
+= pteset
) {
465 for (j
= 0; j
< pteset
; j
++) {
467 test_bit(i
+ j
, (unsigned long *)bitmap
));
469 ASSERT_EQ(!(i
% pteset
), test_bit(i
, (unsigned long *)bitmap
));
472 memset(bitmap
, 0, bitmap_size
);
473 test_cmd_get_dirty_bitmap(fd
, hwpt_id
, length
, iova
, page_size
, bitmap
,
476 /* It as read already -- expect all zeroes */
477 for (i
= 0; i
< nbits
; i
+= pteset
) {
478 for (j
= 0; j
< pteset
; j
++) {
482 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR
),
483 test_bit(i
+ j
, (unsigned long *)bitmap
));
489 #define test_mock_dirty_bitmaps(hwpt_id, length, iova, page_size, pte_size,\
490 bitmap, bitmap_size, flags, _metadata) \
491 ASSERT_EQ(0, _test_mock_dirty_bitmaps(self->fd, hwpt_id, length, iova, \
492 page_size, pte_size, bitmap, \
493 bitmap_size, flags, _metadata))
495 static int _test_cmd_create_access(int fd
, unsigned int ioas_id
,
496 __u32
*access_id
, unsigned int flags
)
498 struct iommu_test_cmd cmd
= {
500 .op
= IOMMU_TEST_OP_CREATE_ACCESS
,
502 .create_access
= { .flags
= flags
},
506 ret
= ioctl(fd
, IOMMU_TEST_CMD
, &cmd
);
509 *access_id
= cmd
.create_access
.out_access_fd
;
512 #define test_cmd_create_access(ioas_id, access_id, flags) \
513 ASSERT_EQ(0, _test_cmd_create_access(self->fd, ioas_id, access_id, \
516 static int _test_cmd_destroy_access(unsigned int access_id
)
518 return close(access_id
);
520 #define test_cmd_destroy_access(access_id) \
521 ASSERT_EQ(0, _test_cmd_destroy_access(access_id))
523 static int _test_cmd_destroy_access_pages(int fd
, unsigned int access_id
,
524 unsigned int access_pages_id
)
526 struct iommu_test_cmd cmd
= {
528 .op
= IOMMU_TEST_OP_DESTROY_ACCESS_PAGES
,
530 .destroy_access_pages
= { .access_pages_id
= access_pages_id
},
532 return ioctl(fd
, IOMMU_TEST_CMD
, &cmd
);
534 #define test_cmd_destroy_access_pages(access_id, access_pages_id) \
535 ASSERT_EQ(0, _test_cmd_destroy_access_pages(self->fd, access_id, \
537 #define test_err_destroy_access_pages(_errno, access_id, access_pages_id) \
538 EXPECT_ERRNO(_errno, _test_cmd_destroy_access_pages( \
539 self->fd, access_id, access_pages_id))
541 static int _test_ioctl_destroy(int fd
, unsigned int id
)
543 struct iommu_destroy cmd
= {
547 return ioctl(fd
, IOMMU_DESTROY
, &cmd
);
549 #define test_ioctl_destroy(id) ASSERT_EQ(0, _test_ioctl_destroy(self->fd, id))
551 static int _test_ioctl_ioas_alloc(int fd
, __u32
*id
)
553 struct iommu_ioas_alloc cmd
= {
558 ret
= ioctl(fd
, IOMMU_IOAS_ALLOC
, &cmd
);
561 *id
= cmd
.out_ioas_id
;
564 #define test_ioctl_ioas_alloc(id) \
566 ASSERT_EQ(0, _test_ioctl_ioas_alloc(self->fd, id)); \
567 ASSERT_NE(0, *(id)); \
570 static int _test_ioctl_ioas_map(int fd
, unsigned int ioas_id
, void *buffer
,
571 size_t length
, __u64
*iova
, unsigned int flags
)
573 struct iommu_ioas_map cmd
= {
577 .user_va
= (uintptr_t)buffer
,
582 if (flags
& IOMMU_IOAS_MAP_FIXED_IOVA
)
585 ret
= ioctl(fd
, IOMMU_IOAS_MAP
, &cmd
);
589 #define test_ioctl_ioas_map(buffer, length, iova_p) \
590 ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \
592 IOMMU_IOAS_MAP_WRITEABLE | \
593 IOMMU_IOAS_MAP_READABLE))
595 #define test_err_ioctl_ioas_map(_errno, buffer, length, iova_p) \
596 EXPECT_ERRNO(_errno, \
597 _test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \
599 IOMMU_IOAS_MAP_WRITEABLE | \
600 IOMMU_IOAS_MAP_READABLE))
602 #define test_ioctl_ioas_map_id(ioas_id, buffer, length, iova_p) \
603 ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, ioas_id, buffer, length, \
605 IOMMU_IOAS_MAP_WRITEABLE | \
606 IOMMU_IOAS_MAP_READABLE))
608 #define test_ioctl_ioas_map_fixed(buffer, length, iova) \
610 __u64 __iova = iova; \
611 ASSERT_EQ(0, _test_ioctl_ioas_map( \
612 self->fd, self->ioas_id, buffer, length, \
614 IOMMU_IOAS_MAP_FIXED_IOVA | \
615 IOMMU_IOAS_MAP_WRITEABLE | \
616 IOMMU_IOAS_MAP_READABLE)); \
619 #define test_ioctl_ioas_map_fixed_id(ioas_id, buffer, length, iova) \
621 __u64 __iova = iova; \
623 _test_ioctl_ioas_map( \
624 self->fd, ioas_id, buffer, length, &__iova, \
625 IOMMU_IOAS_MAP_FIXED_IOVA | \
626 IOMMU_IOAS_MAP_WRITEABLE | \
627 IOMMU_IOAS_MAP_READABLE)); \
630 #define test_err_ioctl_ioas_map_fixed(_errno, buffer, length, iova) \
632 __u64 __iova = iova; \
633 EXPECT_ERRNO(_errno, \
634 _test_ioctl_ioas_map( \
635 self->fd, self->ioas_id, buffer, length, \
637 IOMMU_IOAS_MAP_FIXED_IOVA | \
638 IOMMU_IOAS_MAP_WRITEABLE | \
639 IOMMU_IOAS_MAP_READABLE)); \
642 static int _test_ioctl_ioas_unmap(int fd
, unsigned int ioas_id
, uint64_t iova
,
643 size_t length
, uint64_t *out_len
)
645 struct iommu_ioas_unmap cmd
= {
653 ret
= ioctl(fd
, IOMMU_IOAS_UNMAP
, &cmd
);
655 *out_len
= cmd
.length
;
658 #define test_ioctl_ioas_unmap(iova, length) \
659 ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, iova, \
662 #define test_ioctl_ioas_unmap_id(ioas_id, iova, length) \
663 ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, ioas_id, iova, length, \
666 #define test_err_ioctl_ioas_unmap(_errno, iova, length) \
667 EXPECT_ERRNO(_errno, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, \
670 static int _test_ioctl_ioas_map_file(int fd
, unsigned int ioas_id
, int mfd
,
671 size_t start
, size_t length
, __u64
*iova
,
674 struct iommu_ioas_map_file cmd
= {
684 if (flags
& IOMMU_IOAS_MAP_FIXED_IOVA
)
687 ret
= ioctl(fd
, IOMMU_IOAS_MAP_FILE
, &cmd
);
692 #define test_ioctl_ioas_map_file(mfd, start, length, iova_p) \
694 _test_ioctl_ioas_map_file( \
695 self->fd, self->ioas_id, mfd, start, length, iova_p, \
696 IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))
698 #define test_err_ioctl_ioas_map_file(_errno, mfd, start, length, iova_p) \
701 _test_ioctl_ioas_map_file( \
702 self->fd, self->ioas_id, mfd, start, length, iova_p, \
703 IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))
705 #define test_ioctl_ioas_map_id_file(ioas_id, mfd, start, length, iova_p) \
707 _test_ioctl_ioas_map_file( \
708 self->fd, ioas_id, mfd, start, length, iova_p, \
709 IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))
711 static int _test_ioctl_set_temp_memory_limit(int fd
, unsigned int limit
)
713 struct iommu_test_cmd memlimit_cmd
= {
714 .size
= sizeof(memlimit_cmd
),
715 .op
= IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT
,
716 .memory_limit
= { .limit
= limit
},
719 return ioctl(fd
, _IOMMU_TEST_CMD(IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT
),
723 #define test_ioctl_set_temp_memory_limit(limit) \
724 ASSERT_EQ(0, _test_ioctl_set_temp_memory_limit(self->fd, limit))
726 #define test_ioctl_set_default_memory_limit() \
727 test_ioctl_set_temp_memory_limit(65536)
729 static void teardown_iommufd(int fd
, struct __test_metadata
*_metadata
)
731 struct iommu_test_cmd test_cmd
= {
732 .size
= sizeof(test_cmd
),
733 .op
= IOMMU_TEST_OP_MD_CHECK_REFS
,
734 .check_refs
= { .length
= BUFFER_SIZE
,
735 .uptr
= (uintptr_t)buffer
},
741 EXPECT_EQ(0, close(fd
));
743 fd
= open("/dev/iommu", O_RDWR
);
745 EXPECT_EQ(0, ioctl(fd
, _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS
),
747 EXPECT_EQ(0, close(fd
));
750 #define EXPECT_ERRNO(expected_errno, cmd) \
752 ASSERT_EQ(-1, cmd); \
753 EXPECT_EQ(expected_errno, errno); \
758 /* @data can be NULL */
759 static int _test_cmd_get_hw_info(int fd
, __u32 device_id
, void *data
,
760 size_t data_len
, uint32_t *capabilities
)
762 struct iommu_test_hw_info
*info
= (struct iommu_test_hw_info
*)data
;
763 struct iommu_hw_info cmd
= {
766 .data_len
= data_len
,
767 .data_uptr
= (uint64_t)data
,
768 .out_capabilities
= 0,
772 ret
= ioctl(fd
, IOMMU_GET_HW_INFO
, &cmd
);
776 assert(cmd
.out_data_type
== IOMMU_HW_INFO_TYPE_SELFTEST
);
779 * The struct iommu_test_hw_info should be the one defined
780 * by the current kernel.
782 assert(cmd
.data_len
== sizeof(struct iommu_test_hw_info
));
785 * Trailing bytes should be 0 if user buffer is larger than
786 * the data that kernel reports.
788 if (data_len
> cmd
.data_len
) {
789 char *ptr
= (char *)(data
+ cmd
.data_len
);
792 while (idx
< data_len
- cmd
.data_len
) {
793 assert(!*(ptr
+ idx
));
799 if (data_len
>= offsetofend(struct iommu_test_hw_info
, test_reg
))
800 assert(info
->test_reg
== IOMMU_HW_INFO_SELFTEST_REGVAL
);
801 if (data_len
>= offsetofend(struct iommu_test_hw_info
, flags
))
802 assert(!info
->flags
);
806 *capabilities
= cmd
.out_capabilities
;
811 #define test_cmd_get_hw_info(device_id, data, data_len) \
812 ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, data, \
815 #define test_err_get_hw_info(_errno, device_id, data, data_len) \
816 EXPECT_ERRNO(_errno, _test_cmd_get_hw_info(self->fd, device_id, data, \
819 #define test_cmd_get_hw_capabilities(device_id, caps, mask) \
820 ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, NULL, 0, &caps))
822 static int _test_ioctl_fault_alloc(int fd
, __u32
*fault_id
, __u32
*fault_fd
)
824 struct iommu_fault_alloc cmd
= {
829 ret
= ioctl(fd
, IOMMU_FAULT_QUEUE_ALLOC
, &cmd
);
832 *fault_id
= cmd
.out_fault_id
;
833 *fault_fd
= cmd
.out_fault_fd
;
837 #define test_ioctl_fault_alloc(fault_id, fault_fd) \
839 ASSERT_EQ(0, _test_ioctl_fault_alloc(self->fd, fault_id, \
841 ASSERT_NE(0, *(fault_id)); \
842 ASSERT_NE(0, *(fault_fd)); \
845 static int _test_cmd_trigger_iopf(int fd
, __u32 device_id
, __u32 fault_fd
)
847 struct iommu_test_cmd trigger_iopf_cmd
= {
848 .size
= sizeof(trigger_iopf_cmd
),
849 .op
= IOMMU_TEST_OP_TRIGGER_IOPF
,
854 .perm
= IOMMU_PGFAULT_PERM_READ
| IOMMU_PGFAULT_PERM_WRITE
,
858 struct iommu_hwpt_page_response response
= {
859 .code
= IOMMUFD_PAGE_RESP_SUCCESS
,
861 struct iommu_hwpt_pgfault fault
= {};
865 ret
= ioctl(fd
, _IOMMU_TEST_CMD(IOMMU_TEST_OP_TRIGGER_IOPF
), &trigger_iopf_cmd
);
869 bytes
= read(fault_fd
, &fault
, sizeof(fault
));
873 response
.cookie
= fault
.cookie
;
875 bytes
= write(fault_fd
, &response
, sizeof(response
));
882 #define test_cmd_trigger_iopf(device_id, fault_fd) \
883 ASSERT_EQ(0, _test_cmd_trigger_iopf(self->fd, device_id, fault_fd))
885 static int _test_cmd_viommu_alloc(int fd
, __u32 device_id
, __u32 hwpt_id
,
886 __u32 type
, __u32 flags
, __u32
*viommu_id
)
888 struct iommu_viommu_alloc cmd
= {
897 ret
= ioctl(fd
, IOMMU_VIOMMU_ALLOC
, &cmd
);
901 *viommu_id
= cmd
.out_viommu_id
;
905 #define test_cmd_viommu_alloc(device_id, hwpt_id, type, viommu_id) \
906 ASSERT_EQ(0, _test_cmd_viommu_alloc(self->fd, device_id, hwpt_id, \
908 #define test_err_viommu_alloc(_errno, device_id, hwpt_id, type, viommu_id) \
909 EXPECT_ERRNO(_errno, \
910 _test_cmd_viommu_alloc(self->fd, device_id, hwpt_id, \
913 static int _test_cmd_vdevice_alloc(int fd
, __u32 viommu_id
, __u32 idev_id
,
914 __u64 virt_id
, __u32
*vdev_id
)
916 struct iommu_vdevice_alloc cmd
= {
919 .viommu_id
= viommu_id
,
924 ret
= ioctl(fd
, IOMMU_VDEVICE_ALLOC
, &cmd
);
928 *vdev_id
= cmd
.out_vdevice_id
;
932 #define test_cmd_vdevice_alloc(viommu_id, idev_id, virt_id, vdev_id) \
933 ASSERT_EQ(0, _test_cmd_vdevice_alloc(self->fd, viommu_id, idev_id, \
935 #define test_err_vdevice_alloc(_errno, viommu_id, idev_id, virt_id, vdev_id) \
936 EXPECT_ERRNO(_errno, \
937 _test_cmd_vdevice_alloc(self->fd, viommu_id, idev_id, \