Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / tools / testing / selftests / iommu / iommufd_fail_nth.c
blob22f6fd5f0f7414d45edbc2ce36ad3a4fb83959f3
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
4 * These tests are "kernel integrity" tests. They are looking for kernel
5 * WARN/OOPS/kasn/etc splats triggered by kernel sanitizers & debugging
6 * features. It does not attempt to verify that the system calls are doing what
7 * they are supposed to do.
9 * The basic philosophy is to run a sequence of calls that will succeed and then
10 * sweep every failure injection point on that call chain to look for
11 * interesting things in error handling.
13 * This test is best run with:
14 * echo 1 > /proc/sys/kernel/panic_on_warn
15 * If something is actually going wrong.
17 #include <fcntl.h>
18 #include <dirent.h>
20 #define __EXPORTED_HEADERS__
21 #include <linux/vfio.h>
23 #include "iommufd_utils.h"
25 static bool have_fault_injection;
27 static int writeat(int dfd, const char *fn, const char *val)
29 size_t val_len = strlen(val);
30 ssize_t res;
31 int fd;
33 fd = openat(dfd, fn, O_WRONLY);
34 if (fd == -1)
35 return -1;
36 res = write(fd, val, val_len);
37 assert(res == val_len);
38 close(fd);
39 return 0;
42 static __attribute__((constructor)) void setup_buffer(void)
44 PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
46 BUFFER_SIZE = 2*1024*1024;
48 buffer = mmap(0, BUFFER_SIZE, PROT_READ | PROT_WRITE,
49 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
51 mfd_buffer = memfd_mmap(BUFFER_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
52 &mfd);
56 * This sets up fail_injection in a way that is useful for this test.
57 * It does not attempt to restore things back to how they were.
59 static __attribute__((constructor)) void setup_fault_injection(void)
61 DIR *debugfs = opendir("/sys/kernel/debug/");
62 struct dirent *dent;
64 if (!debugfs)
65 return;
67 /* Allow any allocation call to be fault injected */
68 if (writeat(dirfd(debugfs), "failslab/ignore-gfp-wait", "N"))
69 return;
70 writeat(dirfd(debugfs), "fail_page_alloc/ignore-gfp-wait", "N");
71 writeat(dirfd(debugfs), "fail_page_alloc/ignore-gfp-highmem", "N");
73 while ((dent = readdir(debugfs))) {
74 char fn[300];
76 if (strncmp(dent->d_name, "fail", 4) != 0)
77 continue;
79 /* We are looking for kernel splats, quiet down the log */
80 snprintf(fn, sizeof(fn), "%s/verbose", dent->d_name);
81 writeat(dirfd(debugfs), fn, "0");
83 closedir(debugfs);
84 have_fault_injection = true;
87 struct fail_nth_state {
88 int proc_fd;
89 unsigned int iteration;
92 static void fail_nth_first(struct __test_metadata *_metadata,
93 struct fail_nth_state *nth_state)
95 char buf[300];
97 snprintf(buf, sizeof(buf), "/proc/self/task/%u/fail-nth", getpid());
98 nth_state->proc_fd = open(buf, O_RDWR);
99 ASSERT_NE(-1, nth_state->proc_fd);
102 static bool fail_nth_next(struct __test_metadata *_metadata,
103 struct fail_nth_state *nth_state,
104 int test_result)
106 static const char disable_nth[] = "0";
107 char buf[300];
110 * This is just an arbitrary limit based on the current kernel
111 * situation. Changes in the kernel can dramatically change the number of
112 * required fault injection sites, so if this hits it doesn't
113 * necessarily mean a test failure, just that the limit has to be made
114 * bigger.
116 ASSERT_GT(400, nth_state->iteration);
117 if (nth_state->iteration != 0) {
118 ssize_t res;
119 ssize_t res2;
121 buf[0] = 0;
123 * Annoyingly disabling the nth can also fail. This means
124 * the test passed without triggering failure
126 res = pread(nth_state->proc_fd, buf, sizeof(buf), 0);
127 if (res == -1 && errno == EFAULT) {
128 buf[0] = '1';
129 buf[1] = '\n';
130 res = 2;
133 res2 = pwrite(nth_state->proc_fd, disable_nth,
134 ARRAY_SIZE(disable_nth) - 1, 0);
135 if (res2 == -1 && errno == EFAULT) {
136 res2 = pwrite(nth_state->proc_fd, disable_nth,
137 ARRAY_SIZE(disable_nth) - 1, 0);
138 buf[0] = '1';
139 buf[1] = '\n';
141 ASSERT_EQ(ARRAY_SIZE(disable_nth) - 1, res2);
143 /* printf(" nth %u result=%d nth=%u\n", nth_state->iteration,
144 test_result, atoi(buf)); */
145 fflush(stdout);
146 ASSERT_LT(1, res);
147 if (res != 2 || buf[0] != '0' || buf[1] != '\n')
148 return false;
149 } else {
150 /* printf(" nth %u result=%d\n", nth_state->iteration,
151 test_result); */
153 nth_state->iteration++;
154 return true;
158 * This is called during the test to start failure injection. It allows the test
159 * to do some setup that has already been swept and thus reduce the required
160 * iterations.
162 void __fail_nth_enable(struct __test_metadata *_metadata,
163 struct fail_nth_state *nth_state)
165 char buf[300];
166 size_t len;
168 if (!nth_state->iteration)
169 return;
171 len = snprintf(buf, sizeof(buf), "%u", nth_state->iteration);
172 ASSERT_EQ(len, pwrite(nth_state->proc_fd, buf, len, 0));
174 #define fail_nth_enable() __fail_nth_enable(_metadata, _nth_state)
176 #define TEST_FAIL_NTH(fixture_name, name) \
177 static int test_nth_##name(struct __test_metadata *_metadata, \
178 FIXTURE_DATA(fixture_name) *self, \
179 const FIXTURE_VARIANT(fixture_name) \
180 *variant, \
181 struct fail_nth_state *_nth_state); \
182 TEST_F(fixture_name, name) \
184 struct fail_nth_state nth_state = {}; \
185 int test_result = 0; \
187 if (!have_fault_injection) \
188 SKIP(return, \
189 "fault injection is not enabled in the kernel"); \
190 fail_nth_first(_metadata, &nth_state); \
191 ASSERT_EQ(0, test_nth_##name(_metadata, self, variant, \
192 &nth_state)); \
193 while (fail_nth_next(_metadata, &nth_state, test_result)) { \
194 fixture_name##_teardown(_metadata, self, variant); \
195 fixture_name##_setup(_metadata, self, variant); \
196 test_result = test_nth_##name(_metadata, self, \
197 variant, &nth_state); \
198 }; \
199 ASSERT_EQ(0, test_result); \
201 static int test_nth_##name( \
202 struct __test_metadata __attribute__((unused)) *_metadata, \
203 FIXTURE_DATA(fixture_name) __attribute__((unused)) *self, \
204 const FIXTURE_VARIANT(fixture_name) __attribute__((unused)) \
205 *variant, \
206 struct fail_nth_state *_nth_state)
208 FIXTURE(basic_fail_nth)
210 int fd;
211 uint32_t access_id;
214 FIXTURE_SETUP(basic_fail_nth)
216 self->fd = -1;
217 self->access_id = 0;
220 FIXTURE_TEARDOWN(basic_fail_nth)
222 int rc;
224 if (self->access_id) {
225 /* The access FD holds the iommufd open until it closes */
226 rc = _test_cmd_destroy_access(self->access_id);
227 assert(rc == 0);
229 teardown_iommufd(self->fd, _metadata);
232 /* Cover ioas.c */
233 TEST_FAIL_NTH(basic_fail_nth, basic)
235 struct iommu_iova_range ranges[10];
236 uint32_t ioas_id;
237 __u64 iova;
239 fail_nth_enable();
241 self->fd = open("/dev/iommu", O_RDWR);
242 if (self->fd == -1)
243 return -1;
245 if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
246 return -1;
249 struct iommu_ioas_iova_ranges ranges_cmd = {
250 .size = sizeof(ranges_cmd),
251 .num_iovas = ARRAY_SIZE(ranges),
252 .ioas_id = ioas_id,
253 .allowed_iovas = (uintptr_t)ranges,
255 if (ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd))
256 return -1;
260 struct iommu_ioas_allow_iovas allow_cmd = {
261 .size = sizeof(allow_cmd),
262 .ioas_id = ioas_id,
263 .num_iovas = 1,
264 .allowed_iovas = (uintptr_t)ranges,
267 ranges[0].start = 16*1024;
268 ranges[0].last = BUFFER_SIZE + 16 * 1024 * 600 - 1;
269 if (ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd))
270 return -1;
273 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
274 IOMMU_IOAS_MAP_WRITEABLE |
275 IOMMU_IOAS_MAP_READABLE))
276 return -1;
279 struct iommu_ioas_copy copy_cmd = {
280 .size = sizeof(copy_cmd),
281 .flags = IOMMU_IOAS_MAP_WRITEABLE |
282 IOMMU_IOAS_MAP_READABLE,
283 .dst_ioas_id = ioas_id,
284 .src_ioas_id = ioas_id,
285 .src_iova = iova,
286 .length = sizeof(ranges),
289 if (ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd))
290 return -1;
293 if (_test_ioctl_ioas_unmap(self->fd, ioas_id, iova, BUFFER_SIZE,
294 NULL))
295 return -1;
296 /* Failure path of no IOVA to unmap */
297 _test_ioctl_ioas_unmap(self->fd, ioas_id, iova, BUFFER_SIZE, NULL);
298 return 0;
301 /* iopt_area_fill_domains() and iopt_area_fill_domain() */
302 TEST_FAIL_NTH(basic_fail_nth, map_domain)
304 uint32_t ioas_id;
305 __u32 stdev_id;
306 __u32 hwpt_id;
307 __u64 iova;
309 self->fd = open("/dev/iommu", O_RDWR);
310 if (self->fd == -1)
311 return -1;
313 if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
314 return -1;
316 if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
317 return -1;
319 fail_nth_enable();
321 if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
322 return -1;
324 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
325 IOMMU_IOAS_MAP_WRITEABLE |
326 IOMMU_IOAS_MAP_READABLE))
327 return -1;
329 if (_test_ioctl_destroy(self->fd, stdev_id))
330 return -1;
332 if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
333 return -1;
334 return 0;
337 /* iopt_area_fill_domains() and iopt_area_fill_domain() */
338 TEST_FAIL_NTH(basic_fail_nth, map_file_domain)
340 uint32_t ioas_id;
341 __u32 stdev_id;
342 __u32 hwpt_id;
343 __u64 iova;
345 self->fd = open("/dev/iommu", O_RDWR);
346 if (self->fd == -1)
347 return -1;
349 if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
350 return -1;
352 if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
353 return -1;
355 fail_nth_enable();
357 if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
358 return -1;
360 if (_test_ioctl_ioas_map_file(self->fd, ioas_id, mfd, 0, 262144, &iova,
361 IOMMU_IOAS_MAP_WRITEABLE |
362 IOMMU_IOAS_MAP_READABLE))
363 return -1;
365 if (_test_ioctl_destroy(self->fd, stdev_id))
366 return -1;
368 if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
369 return -1;
370 return 0;
373 TEST_FAIL_NTH(basic_fail_nth, map_two_domains)
375 uint32_t ioas_id;
376 __u32 stdev_id2;
377 __u32 stdev_id;
378 __u32 hwpt_id2;
379 __u32 hwpt_id;
380 __u64 iova;
382 self->fd = open("/dev/iommu", O_RDWR);
383 if (self->fd == -1)
384 return -1;
386 if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
387 return -1;
389 if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
390 return -1;
392 if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
393 return -1;
395 fail_nth_enable();
397 if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id2, &hwpt_id2,
398 NULL))
399 return -1;
401 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
402 IOMMU_IOAS_MAP_WRITEABLE |
403 IOMMU_IOAS_MAP_READABLE))
404 return -1;
406 if (_test_ioctl_destroy(self->fd, stdev_id))
407 return -1;
409 if (_test_ioctl_destroy(self->fd, stdev_id2))
410 return -1;
412 if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
413 return -1;
414 if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id2, &hwpt_id2,
415 NULL))
416 return -1;
417 return 0;
420 TEST_FAIL_NTH(basic_fail_nth, access_rw)
422 uint64_t tmp_big[4096];
423 uint32_t ioas_id;
424 uint16_t tmp[32];
425 __u64 iova;
427 self->fd = open("/dev/iommu", O_RDWR);
428 if (self->fd == -1)
429 return -1;
431 if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
432 return -1;
434 if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
435 return -1;
437 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
438 IOMMU_IOAS_MAP_WRITEABLE |
439 IOMMU_IOAS_MAP_READABLE))
440 return -1;
442 fail_nth_enable();
444 if (_test_cmd_create_access(self->fd, ioas_id, &self->access_id, 0))
445 return -1;
448 struct iommu_test_cmd access_cmd = {
449 .size = sizeof(access_cmd),
450 .op = IOMMU_TEST_OP_ACCESS_RW,
451 .id = self->access_id,
452 .access_rw = { .iova = iova,
453 .length = sizeof(tmp),
454 .uptr = (uintptr_t)tmp },
457 // READ
458 if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
459 &access_cmd))
460 return -1;
462 access_cmd.access_rw.flags = MOCK_ACCESS_RW_WRITE;
463 if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
464 &access_cmd))
465 return -1;
467 access_cmd.access_rw.flags = MOCK_ACCESS_RW_SLOW_PATH;
468 if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
469 &access_cmd))
470 return -1;
471 access_cmd.access_rw.flags = MOCK_ACCESS_RW_SLOW_PATH |
472 MOCK_ACCESS_RW_WRITE;
473 if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
474 &access_cmd))
475 return -1;
479 struct iommu_test_cmd access_cmd = {
480 .size = sizeof(access_cmd),
481 .op = IOMMU_TEST_OP_ACCESS_RW,
482 .id = self->access_id,
483 .access_rw = { .iova = iova,
484 .flags = MOCK_ACCESS_RW_SLOW_PATH,
485 .length = sizeof(tmp_big),
486 .uptr = (uintptr_t)tmp_big },
489 if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
490 &access_cmd))
491 return -1;
493 if (_test_cmd_destroy_access(self->access_id))
494 return -1;
495 self->access_id = 0;
496 return 0;
499 /* pages.c access functions */
500 TEST_FAIL_NTH(basic_fail_nth, access_pin)
502 uint32_t access_pages_id;
503 uint32_t ioas_id;
504 __u64 iova;
506 self->fd = open("/dev/iommu", O_RDWR);
507 if (self->fd == -1)
508 return -1;
510 if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
511 return -1;
513 if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
514 return -1;
516 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
517 IOMMU_IOAS_MAP_WRITEABLE |
518 IOMMU_IOAS_MAP_READABLE))
519 return -1;
521 if (_test_cmd_create_access(self->fd, ioas_id, &self->access_id,
522 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES))
523 return -1;
525 fail_nth_enable();
528 struct iommu_test_cmd access_cmd = {
529 .size = sizeof(access_cmd),
530 .op = IOMMU_TEST_OP_ACCESS_PAGES,
531 .id = self->access_id,
532 .access_pages = { .iova = iova,
533 .length = BUFFER_SIZE,
534 .uptr = (uintptr_t)buffer },
537 if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
538 &access_cmd))
539 return -1;
540 access_pages_id = access_cmd.access_pages.out_access_pages_id;
543 if (_test_cmd_destroy_access_pages(self->fd, self->access_id,
544 access_pages_id))
545 return -1;
547 if (_test_cmd_destroy_access(self->access_id))
548 return -1;
549 self->access_id = 0;
550 return 0;
553 /* iopt_pages_fill_xarray() */
554 TEST_FAIL_NTH(basic_fail_nth, access_pin_domain)
556 uint32_t access_pages_id;
557 uint32_t ioas_id;
558 __u32 stdev_id;
559 __u32 hwpt_id;
560 __u64 iova;
562 self->fd = open("/dev/iommu", O_RDWR);
563 if (self->fd == -1)
564 return -1;
566 if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
567 return -1;
569 if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
570 return -1;
572 if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id, NULL))
573 return -1;
575 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
576 IOMMU_IOAS_MAP_WRITEABLE |
577 IOMMU_IOAS_MAP_READABLE))
578 return -1;
580 if (_test_cmd_create_access(self->fd, ioas_id, &self->access_id,
581 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES))
582 return -1;
584 fail_nth_enable();
587 struct iommu_test_cmd access_cmd = {
588 .size = sizeof(access_cmd),
589 .op = IOMMU_TEST_OP_ACCESS_PAGES,
590 .id = self->access_id,
591 .access_pages = { .iova = iova,
592 .length = BUFFER_SIZE,
593 .uptr = (uintptr_t)buffer },
596 if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
597 &access_cmd))
598 return -1;
599 access_pages_id = access_cmd.access_pages.out_access_pages_id;
602 if (_test_cmd_destroy_access_pages(self->fd, self->access_id,
603 access_pages_id))
604 return -1;
606 if (_test_cmd_destroy_access(self->access_id))
607 return -1;
608 self->access_id = 0;
610 if (_test_ioctl_destroy(self->fd, stdev_id))
611 return -1;
612 return 0;
615 /* device.c */
616 TEST_FAIL_NTH(basic_fail_nth, device)
618 struct iommu_test_hw_info info;
619 uint32_t ioas_id;
620 uint32_t ioas_id2;
621 uint32_t stdev_id;
622 uint32_t idev_id;
623 uint32_t hwpt_id;
624 uint32_t viommu_id;
625 uint32_t vdev_id;
626 __u64 iova;
628 self->fd = open("/dev/iommu", O_RDWR);
629 if (self->fd == -1)
630 return -1;
632 if (_test_ioctl_ioas_alloc(self->fd, &ioas_id))
633 return -1;
635 if (_test_ioctl_ioas_alloc(self->fd, &ioas_id2))
636 return -1;
638 iova = MOCK_APERTURE_START;
639 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, PAGE_SIZE, &iova,
640 IOMMU_IOAS_MAP_FIXED_IOVA |
641 IOMMU_IOAS_MAP_WRITEABLE |
642 IOMMU_IOAS_MAP_READABLE))
643 return -1;
644 if (_test_ioctl_ioas_map(self->fd, ioas_id2, buffer, PAGE_SIZE, &iova,
645 IOMMU_IOAS_MAP_FIXED_IOVA |
646 IOMMU_IOAS_MAP_WRITEABLE |
647 IOMMU_IOAS_MAP_READABLE))
648 return -1;
650 fail_nth_enable();
652 if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, NULL,
653 &idev_id))
654 return -1;
656 if (_test_cmd_get_hw_info(self->fd, idev_id, &info, sizeof(info), NULL))
657 return -1;
659 if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0, 0, &hwpt_id,
660 IOMMU_HWPT_DATA_NONE, 0, 0))
661 return -1;
663 if (_test_cmd_mock_domain_replace(self->fd, stdev_id, ioas_id2, NULL))
664 return -1;
666 if (_test_cmd_mock_domain_replace(self->fd, stdev_id, hwpt_id, NULL))
667 return -1;
669 if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0,
670 IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id,
671 IOMMU_HWPT_DATA_NONE, 0, 0))
672 return -1;
674 if (_test_cmd_viommu_alloc(self->fd, idev_id, hwpt_id,
675 IOMMU_VIOMMU_TYPE_SELFTEST, 0, &viommu_id))
676 return -1;
678 if (_test_cmd_vdevice_alloc(self->fd, viommu_id, idev_id, 0, &vdev_id))
679 return -1;
681 return 0;
684 TEST_HARNESS_MAIN