accel/qaic: Add AIC200 support
[drm/drm-misc.git] / tools / testing / selftests / iommu / iommufd_utils.h
blobd979f5b0efe83bb1b9a6143bed981812da2804d1
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3 #ifndef __SELFTEST_IOMMUFD_UTILS
4 #define __SELFTEST_IOMMUFD_UTILS
6 #include <unistd.h>
7 #include <stddef.h>
8 #include <sys/fcntl.h>
9 #include <sys/ioctl.h>
10 #include <stdint.h>
11 #include <assert.h>
13 #include "../kselftest_harness.h"
14 #include "../../../../drivers/iommu/iommufd/iommufd_test.h"
16 /* Hack to make assertions more readable */
17 #define _IOMMU_TEST_CMD(x) IOMMU_TEST_CMD
19 /* Imported from include/asm-generic/bitops/generic-non-atomic.h */
20 #define BITS_PER_BYTE 8
21 #define BITS_PER_LONG __BITS_PER_LONG
22 #define BIT_MASK(nr) (1UL << ((nr) % __BITS_PER_LONG))
23 #define BIT_WORD(nr) ((nr) / __BITS_PER_LONG)
25 enum {
26 IOPT_PAGES_ACCOUNT_NONE = 0,
27 IOPT_PAGES_ACCOUNT_USER = 1,
28 IOPT_PAGES_ACCOUNT_MM = 2,
31 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
33 static inline void set_bit(unsigned int nr, unsigned long *addr)
35 unsigned long mask = BIT_MASK(nr);
36 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
38 *p |= mask;
41 static inline bool test_bit(unsigned int nr, unsigned long *addr)
43 return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)));
46 static void *buffer;
47 static unsigned long BUFFER_SIZE;
49 static void *mfd_buffer;
50 static int mfd;
52 static unsigned long PAGE_SIZE;
54 #define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
55 #define offsetofend(TYPE, MEMBER) \
56 (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
58 static inline void *memfd_mmap(size_t length, int prot, int flags, int *mfd_p)
60 int mfd_flags = (flags & MAP_HUGETLB) ? MFD_HUGETLB : 0;
61 int mfd = memfd_create("buffer", mfd_flags);
63 if (mfd <= 0)
64 return MAP_FAILED;
65 if (ftruncate(mfd, length))
66 return MAP_FAILED;
67 *mfd_p = mfd;
68 return mmap(0, length, prot, flags, mfd, 0);
72 * Have the kernel check the refcount on pages. I don't know why a freshly
73 * mmap'd anon non-compound page starts out with a ref of 3
75 #define check_refs(_ptr, _length, _refs) \
76 ({ \
77 struct iommu_test_cmd test_cmd = { \
78 .size = sizeof(test_cmd), \
79 .op = IOMMU_TEST_OP_MD_CHECK_REFS, \
80 .check_refs = { .length = _length, \
81 .uptr = (uintptr_t)(_ptr), \
82 .refs = _refs }, \
83 }; \
84 ASSERT_EQ(0, \
85 ioctl(self->fd, \
86 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS), \
87 &test_cmd)); \
90 static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *stdev_id,
91 __u32 *hwpt_id, __u32 *idev_id)
93 struct iommu_test_cmd cmd = {
94 .size = sizeof(cmd),
95 .op = IOMMU_TEST_OP_MOCK_DOMAIN,
96 .id = ioas_id,
97 .mock_domain = {},
99 int ret;
101 ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
102 if (ret)
103 return ret;
104 if (stdev_id)
105 *stdev_id = cmd.mock_domain.out_stdev_id;
106 assert(cmd.id != 0);
107 if (hwpt_id)
108 *hwpt_id = cmd.mock_domain.out_hwpt_id;
109 if (idev_id)
110 *idev_id = cmd.mock_domain.out_idev_id;
111 return 0;
113 #define test_cmd_mock_domain(ioas_id, stdev_id, hwpt_id, idev_id) \
114 ASSERT_EQ(0, _test_cmd_mock_domain(self->fd, ioas_id, stdev_id, \
115 hwpt_id, idev_id))
116 #define test_err_mock_domain(_errno, ioas_id, stdev_id, hwpt_id) \
117 EXPECT_ERRNO(_errno, _test_cmd_mock_domain(self->fd, ioas_id, \
118 stdev_id, hwpt_id, NULL))
120 static int _test_cmd_mock_domain_flags(int fd, unsigned int ioas_id,
121 __u32 stdev_flags, __u32 *stdev_id,
122 __u32 *hwpt_id, __u32 *idev_id)
124 struct iommu_test_cmd cmd = {
125 .size = sizeof(cmd),
126 .op = IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS,
127 .id = ioas_id,
128 .mock_domain_flags = { .dev_flags = stdev_flags },
130 int ret;
132 ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
133 if (ret)
134 return ret;
135 if (stdev_id)
136 *stdev_id = cmd.mock_domain_flags.out_stdev_id;
137 assert(cmd.id != 0);
138 if (hwpt_id)
139 *hwpt_id = cmd.mock_domain_flags.out_hwpt_id;
140 if (idev_id)
141 *idev_id = cmd.mock_domain_flags.out_idev_id;
142 return 0;
144 #define test_cmd_mock_domain_flags(ioas_id, flags, stdev_id, hwpt_id, idev_id) \
145 ASSERT_EQ(0, _test_cmd_mock_domain_flags(self->fd, ioas_id, flags, \
146 stdev_id, hwpt_id, idev_id))
147 #define test_err_mock_domain_flags(_errno, ioas_id, flags, stdev_id, hwpt_id) \
148 EXPECT_ERRNO(_errno, \
149 _test_cmd_mock_domain_flags(self->fd, ioas_id, flags, \
150 stdev_id, hwpt_id, NULL))
152 static int _test_cmd_mock_domain_replace(int fd, __u32 stdev_id, __u32 pt_id,
153 __u32 *hwpt_id)
155 struct iommu_test_cmd cmd = {
156 .size = sizeof(cmd),
157 .op = IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE,
158 .id = stdev_id,
159 .mock_domain_replace = {
160 .pt_id = pt_id,
163 int ret;
165 ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
166 if (ret)
167 return ret;
168 if (hwpt_id)
169 *hwpt_id = cmd.mock_domain_replace.pt_id;
170 return 0;
173 #define test_cmd_mock_domain_replace(stdev_id, pt_id) \
174 ASSERT_EQ(0, _test_cmd_mock_domain_replace(self->fd, stdev_id, pt_id, \
175 NULL))
176 #define test_err_mock_domain_replace(_errno, stdev_id, pt_id) \
177 EXPECT_ERRNO(_errno, _test_cmd_mock_domain_replace(self->fd, stdev_id, \
178 pt_id, NULL))
180 static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id, __u32 ft_id,
181 __u32 flags, __u32 *hwpt_id, __u32 data_type,
182 void *data, size_t data_len)
184 struct iommu_hwpt_alloc cmd = {
185 .size = sizeof(cmd),
186 .flags = flags,
187 .dev_id = device_id,
188 .pt_id = pt_id,
189 .data_type = data_type,
190 .data_len = data_len,
191 .data_uptr = (uint64_t)data,
192 .fault_id = ft_id,
194 int ret;
196 ret = ioctl(fd, IOMMU_HWPT_ALLOC, &cmd);
197 if (ret)
198 return ret;
199 if (hwpt_id)
200 *hwpt_id = cmd.out_hwpt_id;
201 return 0;
204 #define test_cmd_hwpt_alloc(device_id, pt_id, flags, hwpt_id) \
205 ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
206 hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, \
208 #define test_err_hwpt_alloc(_errno, device_id, pt_id, flags, hwpt_id) \
209 EXPECT_ERRNO(_errno, _test_cmd_hwpt_alloc( \
210 self->fd, device_id, pt_id, 0, flags, \
211 hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, 0))
213 #define test_cmd_hwpt_alloc_nested(device_id, pt_id, flags, hwpt_id, \
214 data_type, data, data_len) \
215 ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
216 hwpt_id, data_type, data, data_len))
217 #define test_err_hwpt_alloc_nested(_errno, device_id, pt_id, flags, hwpt_id, \
218 data_type, data, data_len) \
219 EXPECT_ERRNO(_errno, \
220 _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
221 hwpt_id, data_type, data, data_len))
223 #define test_cmd_hwpt_alloc_iopf(device_id, pt_id, fault_id, flags, hwpt_id, \
224 data_type, data, data_len) \
225 ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, fault_id, \
226 flags, hwpt_id, data_type, data, \
227 data_len))
228 #define test_err_hwpt_alloc_iopf(_errno, device_id, pt_id, fault_id, flags, \
229 hwpt_id, data_type, data, data_len) \
230 EXPECT_ERRNO(_errno, \
231 _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, fault_id, \
232 flags, hwpt_id, data_type, data, \
233 data_len))
235 #define test_cmd_hwpt_check_iotlb(hwpt_id, iotlb_id, expected) \
236 ({ \
237 struct iommu_test_cmd test_cmd = { \
238 .size = sizeof(test_cmd), \
239 .op = IOMMU_TEST_OP_MD_CHECK_IOTLB, \
240 .id = hwpt_id, \
241 .check_iotlb = { \
242 .id = iotlb_id, \
243 .iotlb = expected, \
244 }, \
245 }; \
246 ASSERT_EQ(0, \
247 ioctl(self->fd, \
248 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_IOTLB), \
249 &test_cmd)); \
252 #define test_cmd_hwpt_check_iotlb_all(hwpt_id, expected) \
253 ({ \
254 int i; \
255 for (i = 0; i < MOCK_NESTED_DOMAIN_IOTLB_NUM; i++) \
256 test_cmd_hwpt_check_iotlb(hwpt_id, i, expected); \
259 #define test_cmd_dev_check_cache(device_id, cache_id, expected) \
260 ({ \
261 struct iommu_test_cmd test_cmd = { \
262 .size = sizeof(test_cmd), \
263 .op = IOMMU_TEST_OP_DEV_CHECK_CACHE, \
264 .id = device_id, \
265 .check_dev_cache = { \
266 .id = cache_id, \
267 .cache = expected, \
268 }, \
269 }; \
270 ASSERT_EQ(0, ioctl(self->fd, \
271 _IOMMU_TEST_CMD( \
272 IOMMU_TEST_OP_DEV_CHECK_CACHE), \
273 &test_cmd)); \
276 #define test_cmd_dev_check_cache_all(device_id, expected) \
277 ({ \
278 int c; \
279 for (c = 0; c < MOCK_DEV_CACHE_NUM; c++) \
280 test_cmd_dev_check_cache(device_id, c, expected); \
283 static int _test_cmd_hwpt_invalidate(int fd, __u32 hwpt_id, void *reqs,
284 uint32_t data_type, uint32_t lreq,
285 uint32_t *nreqs)
287 struct iommu_hwpt_invalidate cmd = {
288 .size = sizeof(cmd),
289 .hwpt_id = hwpt_id,
290 .data_type = data_type,
291 .data_uptr = (uint64_t)reqs,
292 .entry_len = lreq,
293 .entry_num = *nreqs,
295 int rc = ioctl(fd, IOMMU_HWPT_INVALIDATE, &cmd);
296 *nreqs = cmd.entry_num;
297 return rc;
300 #define test_cmd_hwpt_invalidate(hwpt_id, reqs, data_type, lreq, nreqs) \
301 ({ \
302 ASSERT_EQ(0, \
303 _test_cmd_hwpt_invalidate(self->fd, hwpt_id, reqs, \
304 data_type, lreq, nreqs)); \
306 #define test_err_hwpt_invalidate(_errno, hwpt_id, reqs, data_type, lreq, \
307 nreqs) \
308 ({ \
309 EXPECT_ERRNO(_errno, _test_cmd_hwpt_invalidate( \
310 self->fd, hwpt_id, reqs, \
311 data_type, lreq, nreqs)); \
314 static int _test_cmd_viommu_invalidate(int fd, __u32 viommu_id, void *reqs,
315 uint32_t data_type, uint32_t lreq,
316 uint32_t *nreqs)
318 struct iommu_hwpt_invalidate cmd = {
319 .size = sizeof(cmd),
320 .hwpt_id = viommu_id,
321 .data_type = data_type,
322 .data_uptr = (uint64_t)reqs,
323 .entry_len = lreq,
324 .entry_num = *nreqs,
326 int rc = ioctl(fd, IOMMU_HWPT_INVALIDATE, &cmd);
327 *nreqs = cmd.entry_num;
328 return rc;
331 #define test_cmd_viommu_invalidate(viommu, reqs, lreq, nreqs) \
332 ({ \
333 ASSERT_EQ(0, \
334 _test_cmd_viommu_invalidate(self->fd, viommu, reqs, \
335 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST, \
336 lreq, nreqs)); \
338 #define test_err_viommu_invalidate(_errno, viommu_id, reqs, data_type, lreq, \
339 nreqs) \
340 ({ \
341 EXPECT_ERRNO(_errno, _test_cmd_viommu_invalidate( \
342 self->fd, viommu_id, reqs, \
343 data_type, lreq, nreqs)); \
346 static int _test_cmd_access_replace_ioas(int fd, __u32 access_id,
347 unsigned int ioas_id)
349 struct iommu_test_cmd cmd = {
350 .size = sizeof(cmd),
351 .op = IOMMU_TEST_OP_ACCESS_REPLACE_IOAS,
352 .id = access_id,
353 .access_replace_ioas = { .ioas_id = ioas_id },
355 int ret;
357 ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
358 if (ret)
359 return ret;
360 return 0;
362 #define test_cmd_access_replace_ioas(access_id, ioas_id) \
363 ASSERT_EQ(0, _test_cmd_access_replace_ioas(self->fd, access_id, ioas_id))
365 static int _test_cmd_set_dirty_tracking(int fd, __u32 hwpt_id, bool enabled)
367 struct iommu_hwpt_set_dirty_tracking cmd = {
368 .size = sizeof(cmd),
369 .flags = enabled ? IOMMU_HWPT_DIRTY_TRACKING_ENABLE : 0,
370 .hwpt_id = hwpt_id,
372 int ret;
374 ret = ioctl(fd, IOMMU_HWPT_SET_DIRTY_TRACKING, &cmd);
375 if (ret)
376 return -errno;
377 return 0;
379 #define test_cmd_set_dirty_tracking(hwpt_id, enabled) \
380 ASSERT_EQ(0, _test_cmd_set_dirty_tracking(self->fd, hwpt_id, enabled))
382 static int _test_cmd_get_dirty_bitmap(int fd, __u32 hwpt_id, size_t length,
383 __u64 iova, size_t page_size,
384 __u64 *bitmap, __u32 flags)
386 struct iommu_hwpt_get_dirty_bitmap cmd = {
387 .size = sizeof(cmd),
388 .hwpt_id = hwpt_id,
389 .flags = flags,
390 .iova = iova,
391 .length = length,
392 .page_size = page_size,
393 .data = (uintptr_t)bitmap,
395 int ret;
397 ret = ioctl(fd, IOMMU_HWPT_GET_DIRTY_BITMAP, &cmd);
398 if (ret)
399 return ret;
400 return 0;
403 #define test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, \
404 bitmap, flags) \
405 ASSERT_EQ(0, _test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, \
406 page_size, bitmap, flags))
408 static int _test_cmd_mock_domain_set_dirty(int fd, __u32 hwpt_id, size_t length,
409 __u64 iova, size_t page_size,
410 __u64 *bitmap, __u64 *dirty)
412 struct iommu_test_cmd cmd = {
413 .size = sizeof(cmd),
414 .op = IOMMU_TEST_OP_DIRTY,
415 .id = hwpt_id,
416 .dirty = {
417 .iova = iova,
418 .length = length,
419 .page_size = page_size,
420 .uptr = (uintptr_t)bitmap,
423 int ret;
425 ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_DIRTY), &cmd);
426 if (ret)
427 return -ret;
428 if (dirty)
429 *dirty = cmd.dirty.out_nr_dirty;
430 return 0;
433 #define test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size, \
434 bitmap, nr) \
435 ASSERT_EQ(0, \
436 _test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, \
437 page_size, bitmap, nr))
439 static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length,
440 __u64 iova, size_t page_size,
441 size_t pte_page_size, __u64 *bitmap,
442 __u64 nbits, __u32 flags,
443 struct __test_metadata *_metadata)
445 unsigned long npte = pte_page_size / page_size, pteset = 2 * npte;
446 unsigned long j, i, nr = nbits / pteset ?: 1;
447 unsigned long bitmap_size = DIV_ROUND_UP(nbits, BITS_PER_BYTE);
448 __u64 out_dirty = 0;
450 /* Mark all even bits as dirty in the mock domain */
451 memset(bitmap, 0, bitmap_size);
452 for (i = 0; i < nbits; i += pteset)
453 set_bit(i, (unsigned long *)bitmap);
455 test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size,
456 bitmap, &out_dirty);
457 ASSERT_EQ(nr, out_dirty);
459 /* Expect all even bits as dirty in the user bitmap */
460 memset(bitmap, 0, bitmap_size);
461 test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap,
462 flags);
463 /* Beware ASSERT_EQ() is two statements -- braces are not redundant! */
464 for (i = 0; i < nbits; i += pteset) {
465 for (j = 0; j < pteset; j++) {
466 ASSERT_EQ(j < npte,
467 test_bit(i + j, (unsigned long *)bitmap));
469 ASSERT_EQ(!(i % pteset), test_bit(i, (unsigned long *)bitmap));
472 memset(bitmap, 0, bitmap_size);
473 test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap,
474 flags);
476 /* It as read already -- expect all zeroes */
477 for (i = 0; i < nbits; i += pteset) {
478 for (j = 0; j < pteset; j++) {
479 ASSERT_EQ(
480 (j < npte) &&
481 (flags &
482 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR),
483 test_bit(i + j, (unsigned long *)bitmap));
487 return 0;
489 #define test_mock_dirty_bitmaps(hwpt_id, length, iova, page_size, pte_size,\
490 bitmap, bitmap_size, flags, _metadata) \
491 ASSERT_EQ(0, _test_mock_dirty_bitmaps(self->fd, hwpt_id, length, iova, \
492 page_size, pte_size, bitmap, \
493 bitmap_size, flags, _metadata))
495 static int _test_cmd_create_access(int fd, unsigned int ioas_id,
496 __u32 *access_id, unsigned int flags)
498 struct iommu_test_cmd cmd = {
499 .size = sizeof(cmd),
500 .op = IOMMU_TEST_OP_CREATE_ACCESS,
501 .id = ioas_id,
502 .create_access = { .flags = flags },
504 int ret;
506 ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
507 if (ret)
508 return ret;
509 *access_id = cmd.create_access.out_access_fd;
510 return 0;
512 #define test_cmd_create_access(ioas_id, access_id, flags) \
513 ASSERT_EQ(0, _test_cmd_create_access(self->fd, ioas_id, access_id, \
514 flags))
516 static int _test_cmd_destroy_access(unsigned int access_id)
518 return close(access_id);
520 #define test_cmd_destroy_access(access_id) \
521 ASSERT_EQ(0, _test_cmd_destroy_access(access_id))
523 static int _test_cmd_destroy_access_pages(int fd, unsigned int access_id,
524 unsigned int access_pages_id)
526 struct iommu_test_cmd cmd = {
527 .size = sizeof(cmd),
528 .op = IOMMU_TEST_OP_DESTROY_ACCESS_PAGES,
529 .id = access_id,
530 .destroy_access_pages = { .access_pages_id = access_pages_id },
532 return ioctl(fd, IOMMU_TEST_CMD, &cmd);
534 #define test_cmd_destroy_access_pages(access_id, access_pages_id) \
535 ASSERT_EQ(0, _test_cmd_destroy_access_pages(self->fd, access_id, \
536 access_pages_id))
537 #define test_err_destroy_access_pages(_errno, access_id, access_pages_id) \
538 EXPECT_ERRNO(_errno, _test_cmd_destroy_access_pages( \
539 self->fd, access_id, access_pages_id))
541 static int _test_ioctl_destroy(int fd, unsigned int id)
543 struct iommu_destroy cmd = {
544 .size = sizeof(cmd),
545 .id = id,
547 return ioctl(fd, IOMMU_DESTROY, &cmd);
549 #define test_ioctl_destroy(id) ASSERT_EQ(0, _test_ioctl_destroy(self->fd, id))
551 static int _test_ioctl_ioas_alloc(int fd, __u32 *id)
553 struct iommu_ioas_alloc cmd = {
554 .size = sizeof(cmd),
556 int ret;
558 ret = ioctl(fd, IOMMU_IOAS_ALLOC, &cmd);
559 if (ret)
560 return ret;
561 *id = cmd.out_ioas_id;
562 return 0;
564 #define test_ioctl_ioas_alloc(id) \
565 ({ \
566 ASSERT_EQ(0, _test_ioctl_ioas_alloc(self->fd, id)); \
567 ASSERT_NE(0, *(id)); \
570 static int _test_ioctl_ioas_map(int fd, unsigned int ioas_id, void *buffer,
571 size_t length, __u64 *iova, unsigned int flags)
573 struct iommu_ioas_map cmd = {
574 .size = sizeof(cmd),
575 .flags = flags,
576 .ioas_id = ioas_id,
577 .user_va = (uintptr_t)buffer,
578 .length = length,
580 int ret;
582 if (flags & IOMMU_IOAS_MAP_FIXED_IOVA)
583 cmd.iova = *iova;
585 ret = ioctl(fd, IOMMU_IOAS_MAP, &cmd);
586 *iova = cmd.iova;
587 return ret;
589 #define test_ioctl_ioas_map(buffer, length, iova_p) \
590 ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \
591 length, iova_p, \
592 IOMMU_IOAS_MAP_WRITEABLE | \
593 IOMMU_IOAS_MAP_READABLE))
595 #define test_err_ioctl_ioas_map(_errno, buffer, length, iova_p) \
596 EXPECT_ERRNO(_errno, \
597 _test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \
598 length, iova_p, \
599 IOMMU_IOAS_MAP_WRITEABLE | \
600 IOMMU_IOAS_MAP_READABLE))
602 #define test_ioctl_ioas_map_id(ioas_id, buffer, length, iova_p) \
603 ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, ioas_id, buffer, length, \
604 iova_p, \
605 IOMMU_IOAS_MAP_WRITEABLE | \
606 IOMMU_IOAS_MAP_READABLE))
608 #define test_ioctl_ioas_map_fixed(buffer, length, iova) \
609 ({ \
610 __u64 __iova = iova; \
611 ASSERT_EQ(0, _test_ioctl_ioas_map( \
612 self->fd, self->ioas_id, buffer, length, \
613 &__iova, \
614 IOMMU_IOAS_MAP_FIXED_IOVA | \
615 IOMMU_IOAS_MAP_WRITEABLE | \
616 IOMMU_IOAS_MAP_READABLE)); \
619 #define test_ioctl_ioas_map_fixed_id(ioas_id, buffer, length, iova) \
620 ({ \
621 __u64 __iova = iova; \
622 ASSERT_EQ(0, \
623 _test_ioctl_ioas_map( \
624 self->fd, ioas_id, buffer, length, &__iova, \
625 IOMMU_IOAS_MAP_FIXED_IOVA | \
626 IOMMU_IOAS_MAP_WRITEABLE | \
627 IOMMU_IOAS_MAP_READABLE)); \
630 #define test_err_ioctl_ioas_map_fixed(_errno, buffer, length, iova) \
631 ({ \
632 __u64 __iova = iova; \
633 EXPECT_ERRNO(_errno, \
634 _test_ioctl_ioas_map( \
635 self->fd, self->ioas_id, buffer, length, \
636 &__iova, \
637 IOMMU_IOAS_MAP_FIXED_IOVA | \
638 IOMMU_IOAS_MAP_WRITEABLE | \
639 IOMMU_IOAS_MAP_READABLE)); \
642 static int _test_ioctl_ioas_unmap(int fd, unsigned int ioas_id, uint64_t iova,
643 size_t length, uint64_t *out_len)
645 struct iommu_ioas_unmap cmd = {
646 .size = sizeof(cmd),
647 .ioas_id = ioas_id,
648 .iova = iova,
649 .length = length,
651 int ret;
653 ret = ioctl(fd, IOMMU_IOAS_UNMAP, &cmd);
654 if (out_len)
655 *out_len = cmd.length;
656 return ret;
658 #define test_ioctl_ioas_unmap(iova, length) \
659 ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, iova, \
660 length, NULL))
662 #define test_ioctl_ioas_unmap_id(ioas_id, iova, length) \
663 ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, ioas_id, iova, length, \
664 NULL))
666 #define test_err_ioctl_ioas_unmap(_errno, iova, length) \
667 EXPECT_ERRNO(_errno, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, \
668 iova, length, NULL))
670 static int _test_ioctl_ioas_map_file(int fd, unsigned int ioas_id, int mfd,
671 size_t start, size_t length, __u64 *iova,
672 unsigned int flags)
674 struct iommu_ioas_map_file cmd = {
675 .size = sizeof(cmd),
676 .flags = flags,
677 .ioas_id = ioas_id,
678 .fd = mfd,
679 .start = start,
680 .length = length,
682 int ret;
684 if (flags & IOMMU_IOAS_MAP_FIXED_IOVA)
685 cmd.iova = *iova;
687 ret = ioctl(fd, IOMMU_IOAS_MAP_FILE, &cmd);
688 *iova = cmd.iova;
689 return ret;
692 #define test_ioctl_ioas_map_file(mfd, start, length, iova_p) \
693 ASSERT_EQ(0, \
694 _test_ioctl_ioas_map_file( \
695 self->fd, self->ioas_id, mfd, start, length, iova_p, \
696 IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))
698 #define test_err_ioctl_ioas_map_file(_errno, mfd, start, length, iova_p) \
699 EXPECT_ERRNO( \
700 _errno, \
701 _test_ioctl_ioas_map_file( \
702 self->fd, self->ioas_id, mfd, start, length, iova_p, \
703 IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))
705 #define test_ioctl_ioas_map_id_file(ioas_id, mfd, start, length, iova_p) \
706 ASSERT_EQ(0, \
707 _test_ioctl_ioas_map_file( \
708 self->fd, ioas_id, mfd, start, length, iova_p, \
709 IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))
711 static int _test_ioctl_set_temp_memory_limit(int fd, unsigned int limit)
713 struct iommu_test_cmd memlimit_cmd = {
714 .size = sizeof(memlimit_cmd),
715 .op = IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT,
716 .memory_limit = { .limit = limit },
719 return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT),
720 &memlimit_cmd);
723 #define test_ioctl_set_temp_memory_limit(limit) \
724 ASSERT_EQ(0, _test_ioctl_set_temp_memory_limit(self->fd, limit))
726 #define test_ioctl_set_default_memory_limit() \
727 test_ioctl_set_temp_memory_limit(65536)
729 static void teardown_iommufd(int fd, struct __test_metadata *_metadata)
731 struct iommu_test_cmd test_cmd = {
732 .size = sizeof(test_cmd),
733 .op = IOMMU_TEST_OP_MD_CHECK_REFS,
734 .check_refs = { .length = BUFFER_SIZE,
735 .uptr = (uintptr_t)buffer },
738 if (fd == -1)
739 return;
741 EXPECT_EQ(0, close(fd));
743 fd = open("/dev/iommu", O_RDWR);
744 EXPECT_NE(-1, fd);
745 EXPECT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS),
746 &test_cmd));
747 EXPECT_EQ(0, close(fd));
750 #define EXPECT_ERRNO(expected_errno, cmd) \
751 ({ \
752 ASSERT_EQ(-1, cmd); \
753 EXPECT_EQ(expected_errno, errno); \
756 #endif
758 /* @data can be NULL */
759 static int _test_cmd_get_hw_info(int fd, __u32 device_id, void *data,
760 size_t data_len, uint32_t *capabilities)
762 struct iommu_test_hw_info *info = (struct iommu_test_hw_info *)data;
763 struct iommu_hw_info cmd = {
764 .size = sizeof(cmd),
765 .dev_id = device_id,
766 .data_len = data_len,
767 .data_uptr = (uint64_t)data,
768 .out_capabilities = 0,
770 int ret;
772 ret = ioctl(fd, IOMMU_GET_HW_INFO, &cmd);
773 if (ret)
774 return ret;
776 assert(cmd.out_data_type == IOMMU_HW_INFO_TYPE_SELFTEST);
779 * The struct iommu_test_hw_info should be the one defined
780 * by the current kernel.
782 assert(cmd.data_len == sizeof(struct iommu_test_hw_info));
785 * Trailing bytes should be 0 if user buffer is larger than
786 * the data that kernel reports.
788 if (data_len > cmd.data_len) {
789 char *ptr = (char *)(data + cmd.data_len);
790 int idx = 0;
792 while (idx < data_len - cmd.data_len) {
793 assert(!*(ptr + idx));
794 idx++;
798 if (info) {
799 if (data_len >= offsetofend(struct iommu_test_hw_info, test_reg))
800 assert(info->test_reg == IOMMU_HW_INFO_SELFTEST_REGVAL);
801 if (data_len >= offsetofend(struct iommu_test_hw_info, flags))
802 assert(!info->flags);
805 if (capabilities)
806 *capabilities = cmd.out_capabilities;
808 return 0;
811 #define test_cmd_get_hw_info(device_id, data, data_len) \
812 ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, data, \
813 data_len, NULL))
815 #define test_err_get_hw_info(_errno, device_id, data, data_len) \
816 EXPECT_ERRNO(_errno, _test_cmd_get_hw_info(self->fd, device_id, data, \
817 data_len, NULL))
819 #define test_cmd_get_hw_capabilities(device_id, caps, mask) \
820 ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, NULL, 0, &caps))
822 static int _test_ioctl_fault_alloc(int fd, __u32 *fault_id, __u32 *fault_fd)
824 struct iommu_fault_alloc cmd = {
825 .size = sizeof(cmd),
827 int ret;
829 ret = ioctl(fd, IOMMU_FAULT_QUEUE_ALLOC, &cmd);
830 if (ret)
831 return ret;
832 *fault_id = cmd.out_fault_id;
833 *fault_fd = cmd.out_fault_fd;
834 return 0;
837 #define test_ioctl_fault_alloc(fault_id, fault_fd) \
838 ({ \
839 ASSERT_EQ(0, _test_ioctl_fault_alloc(self->fd, fault_id, \
840 fault_fd)); \
841 ASSERT_NE(0, *(fault_id)); \
842 ASSERT_NE(0, *(fault_fd)); \
845 static int _test_cmd_trigger_iopf(int fd, __u32 device_id, __u32 fault_fd)
847 struct iommu_test_cmd trigger_iopf_cmd = {
848 .size = sizeof(trigger_iopf_cmd),
849 .op = IOMMU_TEST_OP_TRIGGER_IOPF,
850 .trigger_iopf = {
851 .dev_id = device_id,
852 .pasid = 0x1,
853 .grpid = 0x2,
854 .perm = IOMMU_PGFAULT_PERM_READ | IOMMU_PGFAULT_PERM_WRITE,
855 .addr = 0xdeadbeaf,
858 struct iommu_hwpt_page_response response = {
859 .code = IOMMUFD_PAGE_RESP_SUCCESS,
861 struct iommu_hwpt_pgfault fault = {};
862 ssize_t bytes;
863 int ret;
865 ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_TRIGGER_IOPF), &trigger_iopf_cmd);
866 if (ret)
867 return ret;
869 bytes = read(fault_fd, &fault, sizeof(fault));
870 if (bytes <= 0)
871 return -EIO;
873 response.cookie = fault.cookie;
875 bytes = write(fault_fd, &response, sizeof(response));
876 if (bytes <= 0)
877 return -EIO;
879 return 0;
882 #define test_cmd_trigger_iopf(device_id, fault_fd) \
883 ASSERT_EQ(0, _test_cmd_trigger_iopf(self->fd, device_id, fault_fd))
885 static int _test_cmd_viommu_alloc(int fd, __u32 device_id, __u32 hwpt_id,
886 __u32 type, __u32 flags, __u32 *viommu_id)
888 struct iommu_viommu_alloc cmd = {
889 .size = sizeof(cmd),
890 .flags = flags,
891 .type = type,
892 .dev_id = device_id,
893 .hwpt_id = hwpt_id,
895 int ret;
897 ret = ioctl(fd, IOMMU_VIOMMU_ALLOC, &cmd);
898 if (ret)
899 return ret;
900 if (viommu_id)
901 *viommu_id = cmd.out_viommu_id;
902 return 0;
905 #define test_cmd_viommu_alloc(device_id, hwpt_id, type, viommu_id) \
906 ASSERT_EQ(0, _test_cmd_viommu_alloc(self->fd, device_id, hwpt_id, \
907 type, 0, viommu_id))
908 #define test_err_viommu_alloc(_errno, device_id, hwpt_id, type, viommu_id) \
909 EXPECT_ERRNO(_errno, \
910 _test_cmd_viommu_alloc(self->fd, device_id, hwpt_id, \
911 type, 0, viommu_id))
913 static int _test_cmd_vdevice_alloc(int fd, __u32 viommu_id, __u32 idev_id,
914 __u64 virt_id, __u32 *vdev_id)
916 struct iommu_vdevice_alloc cmd = {
917 .size = sizeof(cmd),
918 .dev_id = idev_id,
919 .viommu_id = viommu_id,
920 .virt_id = virt_id,
922 int ret;
924 ret = ioctl(fd, IOMMU_VDEVICE_ALLOC, &cmd);
925 if (ret)
926 return ret;
927 if (vdev_id)
928 *vdev_id = cmd.out_vdevice_id;
929 return 0;
932 #define test_cmd_vdevice_alloc(viommu_id, idev_id, virt_id, vdev_id) \
933 ASSERT_EQ(0, _test_cmd_vdevice_alloc(self->fd, viommu_id, idev_id, \
934 virt_id, vdev_id))
935 #define test_err_vdevice_alloc(_errno, viommu_id, idev_id, virt_id, vdev_id) \
936 EXPECT_ERRNO(_errno, \
937 _test_cmd_vdevice_alloc(self->fd, viommu_id, idev_id, \
938 virt_id, vdev_id))