1 // SPDX-License-Identifier: GPL-2.0-only
2 /* I/O iterator tests. This can only test kernel-backed iterator types.
4 * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
13 #include <linux/uio.h>
14 #include <linux/bvec.h>
15 #include <linux/folio_queue.h>
16 #include <kunit/test.h>
18 MODULE_DESCRIPTION("iov_iter testing");
19 MODULE_AUTHOR("David Howells <dhowells@redhat.com>");
20 MODULE_LICENSE("GPL");
22 struct kvec_test_range
{
26 static const struct kvec_test_range kvec_test_ranges
[] = {
38 static inline u8
pattern(unsigned long x
)
43 static void iov_kunit_unmap(void *data
)
48 static void *__init
iov_kunit_create_buffer(struct kunit
*test
,
49 struct page
***ppages
,
56 pages
= kunit_kcalloc(test
, npages
, sizeof(struct page
*), GFP_KERNEL
);
57 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, pages
);
60 got
= alloc_pages_bulk_array(GFP_KERNEL
, npages
, pages
);
62 release_pages(pages
, got
);
63 KUNIT_ASSERT_EQ(test
, got
, npages
);
66 for (int i
= 0; i
< npages
; i
++)
69 buffer
= vmap(pages
, npages
, VM_MAP
| VM_MAP_PUT_PAGES
, PAGE_KERNEL
);
70 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, buffer
);
72 kunit_add_action_or_reset(test
, iov_kunit_unmap
, buffer
);
76 static void __init
iov_kunit_load_kvec(struct kunit
*test
,
77 struct iov_iter
*iter
, int dir
,
78 struct kvec
*kvec
, unsigned int kvmax
,
79 void *buffer
, size_t bufsize
,
80 const struct kvec_test_range
*pr
)
85 for (i
= 0; i
< kvmax
; i
++, pr
++) {
88 KUNIT_ASSERT_GE(test
, pr
->to
, pr
->from
);
89 KUNIT_ASSERT_LE(test
, pr
->to
, bufsize
);
90 kvec
[i
].iov_base
= buffer
+ pr
->from
;
91 kvec
[i
].iov_len
= pr
->to
- pr
->from
;
92 size
+= pr
->to
- pr
->from
;
94 KUNIT_ASSERT_LE(test
, size
, bufsize
);
96 iov_iter_kvec(iter
, dir
, kvec
, i
, size
);
100 * Test copying to a ITER_KVEC-type iterator.
102 static void __init
iov_kunit_copy_to_kvec(struct kunit
*test
)
104 const struct kvec_test_range
*pr
;
105 struct iov_iter iter
;
106 struct page
**spages
, **bpages
;
108 u8
*scratch
, *buffer
;
109 size_t bufsize
, npages
, size
, copied
;
113 npages
= bufsize
/ PAGE_SIZE
;
115 scratch
= iov_kunit_create_buffer(test
, &spages
, npages
);
116 for (i
= 0; i
< bufsize
; i
++)
117 scratch
[i
] = pattern(i
);
119 buffer
= iov_kunit_create_buffer(test
, &bpages
, npages
);
120 memset(buffer
, 0, bufsize
);
122 iov_kunit_load_kvec(test
, &iter
, READ
, kvec
, ARRAY_SIZE(kvec
),
123 buffer
, bufsize
, kvec_test_ranges
);
126 copied
= copy_to_iter(scratch
, size
, &iter
);
128 KUNIT_EXPECT_EQ(test
, copied
, size
);
129 KUNIT_EXPECT_EQ(test
, iter
.count
, 0);
130 KUNIT_EXPECT_EQ(test
, iter
.nr_segs
, 0);
132 /* Build the expected image in the scratch buffer. */
134 memset(scratch
, 0, bufsize
);
135 for (pr
= kvec_test_ranges
; pr
->from
>= 0; pr
++)
136 for (i
= pr
->from
; i
< pr
->to
; i
++)
137 scratch
[i
] = pattern(patt
++);
139 /* Compare the images */
140 for (i
= 0; i
< bufsize
; i
++) {
141 KUNIT_EXPECT_EQ_MSG(test
, buffer
[i
], scratch
[i
], "at i=%x", i
);
142 if (buffer
[i
] != scratch
[i
])
150 * Test copying from a ITER_KVEC-type iterator.
152 static void __init
iov_kunit_copy_from_kvec(struct kunit
*test
)
154 const struct kvec_test_range
*pr
;
155 struct iov_iter iter
;
156 struct page
**spages
, **bpages
;
158 u8
*scratch
, *buffer
;
159 size_t bufsize
, npages
, size
, copied
;
163 npages
= bufsize
/ PAGE_SIZE
;
165 buffer
= iov_kunit_create_buffer(test
, &bpages
, npages
);
166 for (i
= 0; i
< bufsize
; i
++)
167 buffer
[i
] = pattern(i
);
169 scratch
= iov_kunit_create_buffer(test
, &spages
, npages
);
170 memset(scratch
, 0, bufsize
);
172 iov_kunit_load_kvec(test
, &iter
, WRITE
, kvec
, ARRAY_SIZE(kvec
),
173 buffer
, bufsize
, kvec_test_ranges
);
174 size
= min(iter
.count
, bufsize
);
176 copied
= copy_from_iter(scratch
, size
, &iter
);
178 KUNIT_EXPECT_EQ(test
, copied
, size
);
179 KUNIT_EXPECT_EQ(test
, iter
.count
, 0);
180 KUNIT_EXPECT_EQ(test
, iter
.nr_segs
, 0);
182 /* Build the expected image in the main buffer. */
184 memset(buffer
, 0, bufsize
);
185 for (pr
= kvec_test_ranges
; pr
->from
>= 0; pr
++) {
186 for (j
= pr
->from
; j
< pr
->to
; j
++) {
187 buffer
[i
++] = pattern(j
);
194 /* Compare the images */
195 for (i
= 0; i
< bufsize
; i
++) {
196 KUNIT_EXPECT_EQ_MSG(test
, scratch
[i
], buffer
[i
], "at i=%x", i
);
197 if (scratch
[i
] != buffer
[i
])
204 struct bvec_test_range
{
208 static const struct bvec_test_range bvec_test_ranges
[] = {
209 { 0, 0x0002, 0x0002 },
210 { 1, 0x0027, 0x0893 },
211 { 2, 0x0193, 0x0794 },
212 { 3, 0x0000, 0x1000 },
213 { 4, 0x0000, 0x1000 },
214 { 5, 0x0000, 0x1000 },
215 { 6, 0x0000, 0x0ffb },
216 { 6, 0x0ffd, 0x0ffe },
220 static void __init
iov_kunit_load_bvec(struct kunit
*test
,
221 struct iov_iter
*iter
, int dir
,
222 struct bio_vec
*bvec
, unsigned int bvmax
,
223 struct page
**pages
, size_t npages
,
225 const struct bvec_test_range
*pr
)
227 struct page
*can_merge
= NULL
, *page
;
231 for (i
= 0; i
< bvmax
; i
++, pr
++) {
234 KUNIT_ASSERT_LT(test
, pr
->page
, npages
);
235 KUNIT_ASSERT_LT(test
, pr
->page
* PAGE_SIZE
, bufsize
);
236 KUNIT_ASSERT_GE(test
, pr
->from
, 0);
237 KUNIT_ASSERT_GE(test
, pr
->to
, pr
->from
);
238 KUNIT_ASSERT_LE(test
, pr
->to
, PAGE_SIZE
);
240 page
= pages
[pr
->page
];
241 if (pr
->from
== 0 && pr
->from
!= pr
->to
&& page
== can_merge
) {
243 bvec
[i
].bv_len
+= pr
->to
;
245 bvec_set_page(&bvec
[i
], page
, pr
->to
- pr
->from
, pr
->from
);
248 size
+= pr
->to
- pr
->from
;
249 if ((pr
->to
& ~PAGE_MASK
) == 0)
250 can_merge
= page
+ pr
->to
/ PAGE_SIZE
;
255 iov_iter_bvec(iter
, dir
, bvec
, i
, size
);
259 * Test copying to a ITER_BVEC-type iterator.
261 static void __init
iov_kunit_copy_to_bvec(struct kunit
*test
)
263 const struct bvec_test_range
*pr
;
264 struct iov_iter iter
;
265 struct bio_vec bvec
[8];
266 struct page
**spages
, **bpages
;
267 u8
*scratch
, *buffer
;
268 size_t bufsize
, npages
, size
, copied
;
272 npages
= bufsize
/ PAGE_SIZE
;
274 scratch
= iov_kunit_create_buffer(test
, &spages
, npages
);
275 for (i
= 0; i
< bufsize
; i
++)
276 scratch
[i
] = pattern(i
);
278 buffer
= iov_kunit_create_buffer(test
, &bpages
, npages
);
279 memset(buffer
, 0, bufsize
);
281 iov_kunit_load_bvec(test
, &iter
, READ
, bvec
, ARRAY_SIZE(bvec
),
282 bpages
, npages
, bufsize
, bvec_test_ranges
);
285 copied
= copy_to_iter(scratch
, size
, &iter
);
287 KUNIT_EXPECT_EQ(test
, copied
, size
);
288 KUNIT_EXPECT_EQ(test
, iter
.count
, 0);
289 KUNIT_EXPECT_EQ(test
, iter
.nr_segs
, 0);
291 /* Build the expected image in the scratch buffer. */
294 memset(scratch
, 0, bufsize
);
295 for (pr
= bvec_test_ranges
; pr
->from
>= 0; pr
++, b
++) {
296 u8
*p
= scratch
+ pr
->page
* PAGE_SIZE
;
298 for (i
= pr
->from
; i
< pr
->to
; i
++)
299 p
[i
] = pattern(patt
++);
302 /* Compare the images */
303 for (i
= 0; i
< bufsize
; i
++) {
304 KUNIT_EXPECT_EQ_MSG(test
, buffer
[i
], scratch
[i
], "at i=%x", i
);
305 if (buffer
[i
] != scratch
[i
])
313 * Test copying from a ITER_BVEC-type iterator.
315 static void __init
iov_kunit_copy_from_bvec(struct kunit
*test
)
317 const struct bvec_test_range
*pr
;
318 struct iov_iter iter
;
319 struct bio_vec bvec
[8];
320 struct page
**spages
, **bpages
;
321 u8
*scratch
, *buffer
;
322 size_t bufsize
, npages
, size
, copied
;
326 npages
= bufsize
/ PAGE_SIZE
;
328 buffer
= iov_kunit_create_buffer(test
, &bpages
, npages
);
329 for (i
= 0; i
< bufsize
; i
++)
330 buffer
[i
] = pattern(i
);
332 scratch
= iov_kunit_create_buffer(test
, &spages
, npages
);
333 memset(scratch
, 0, bufsize
);
335 iov_kunit_load_bvec(test
, &iter
, WRITE
, bvec
, ARRAY_SIZE(bvec
),
336 bpages
, npages
, bufsize
, bvec_test_ranges
);
339 copied
= copy_from_iter(scratch
, size
, &iter
);
341 KUNIT_EXPECT_EQ(test
, copied
, size
);
342 KUNIT_EXPECT_EQ(test
, iter
.count
, 0);
343 KUNIT_EXPECT_EQ(test
, iter
.nr_segs
, 0);
345 /* Build the expected image in the main buffer. */
347 memset(buffer
, 0, bufsize
);
348 for (pr
= bvec_test_ranges
; pr
->from
>= 0; pr
++) {
349 size_t patt
= pr
->page
* PAGE_SIZE
;
351 for (j
= pr
->from
; j
< pr
->to
; j
++) {
352 buffer
[i
++] = pattern(patt
+ j
);
359 /* Compare the images */
360 for (i
= 0; i
< bufsize
; i
++) {
361 KUNIT_EXPECT_EQ_MSG(test
, scratch
[i
], buffer
[i
], "at i=%x", i
);
362 if (scratch
[i
] != buffer
[i
])
369 static void iov_kunit_destroy_folioq(void *data
)
371 struct folio_queue
*folioq
, *next
;
373 for (folioq
= data
; folioq
; folioq
= next
) {
375 for (int i
= 0; i
< folioq_nr_slots(folioq
); i
++)
376 if (folioq_folio(folioq
, i
))
377 folio_put(folioq_folio(folioq
, i
));
382 static void __init
iov_kunit_load_folioq(struct kunit
*test
,
383 struct iov_iter
*iter
, int dir
,
384 struct folio_queue
*folioq
,
385 struct page
**pages
, size_t npages
)
387 struct folio_queue
*p
= folioq
;
391 for (i
= 0; i
< npages
; i
++) {
392 if (folioq_full(p
)) {
393 p
->next
= kzalloc(sizeof(struct folio_queue
), GFP_KERNEL
);
394 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, p
->next
);
395 folioq_init(p
->next
);
399 folioq_append(p
, page_folio(pages
[i
]));
402 iov_iter_folio_queue(iter
, dir
, folioq
, 0, 0, size
);
405 static struct folio_queue
*iov_kunit_create_folioq(struct kunit
*test
)
407 struct folio_queue
*folioq
;
409 folioq
= kzalloc(sizeof(struct folio_queue
), GFP_KERNEL
);
410 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, folioq
);
411 kunit_add_action_or_reset(test
, iov_kunit_destroy_folioq
, folioq
);
417 * Test copying to a ITER_FOLIOQ-type iterator.
419 static void __init
iov_kunit_copy_to_folioq(struct kunit
*test
)
421 const struct kvec_test_range
*pr
;
422 struct iov_iter iter
;
423 struct folio_queue
*folioq
;
424 struct page
**spages
, **bpages
;
425 u8
*scratch
, *buffer
;
426 size_t bufsize
, npages
, size
, copied
;
430 npages
= bufsize
/ PAGE_SIZE
;
432 folioq
= iov_kunit_create_folioq(test
);
434 scratch
= iov_kunit_create_buffer(test
, &spages
, npages
);
435 for (i
= 0; i
< bufsize
; i
++)
436 scratch
[i
] = pattern(i
);
438 buffer
= iov_kunit_create_buffer(test
, &bpages
, npages
);
439 memset(buffer
, 0, bufsize
);
441 iov_kunit_load_folioq(test
, &iter
, READ
, folioq
, bpages
, npages
);
444 for (pr
= kvec_test_ranges
; pr
->from
>= 0; pr
++) {
445 size
= pr
->to
- pr
->from
;
446 KUNIT_ASSERT_LE(test
, pr
->to
, bufsize
);
448 iov_iter_folio_queue(&iter
, READ
, folioq
, 0, 0, pr
->to
);
449 iov_iter_advance(&iter
, pr
->from
);
450 copied
= copy_to_iter(scratch
+ i
, size
, &iter
);
452 KUNIT_EXPECT_EQ(test
, copied
, size
);
453 KUNIT_EXPECT_EQ(test
, iter
.count
, 0);
454 KUNIT_EXPECT_EQ(test
, iter
.iov_offset
, pr
->to
% PAGE_SIZE
);
456 if (test
->status
== KUNIT_FAILURE
)
460 /* Build the expected image in the scratch buffer. */
462 memset(scratch
, 0, bufsize
);
463 for (pr
= kvec_test_ranges
; pr
->from
>= 0; pr
++)
464 for (i
= pr
->from
; i
< pr
->to
; i
++)
465 scratch
[i
] = pattern(patt
++);
467 /* Compare the images */
468 for (i
= 0; i
< bufsize
; i
++) {
469 KUNIT_EXPECT_EQ_MSG(test
, buffer
[i
], scratch
[i
], "at i=%x", i
);
470 if (buffer
[i
] != scratch
[i
])
479 * Test copying from a ITER_FOLIOQ-type iterator.
481 static void __init
iov_kunit_copy_from_folioq(struct kunit
*test
)
483 const struct kvec_test_range
*pr
;
484 struct iov_iter iter
;
485 struct folio_queue
*folioq
;
486 struct page
**spages
, **bpages
;
487 u8
*scratch
, *buffer
;
488 size_t bufsize
, npages
, size
, copied
;
492 npages
= bufsize
/ PAGE_SIZE
;
494 folioq
= iov_kunit_create_folioq(test
);
496 buffer
= iov_kunit_create_buffer(test
, &bpages
, npages
);
497 for (i
= 0; i
< bufsize
; i
++)
498 buffer
[i
] = pattern(i
);
500 scratch
= iov_kunit_create_buffer(test
, &spages
, npages
);
501 memset(scratch
, 0, bufsize
);
503 iov_kunit_load_folioq(test
, &iter
, READ
, folioq
, bpages
, npages
);
506 for (pr
= kvec_test_ranges
; pr
->from
>= 0; pr
++) {
507 size
= pr
->to
- pr
->from
;
508 KUNIT_ASSERT_LE(test
, pr
->to
, bufsize
);
510 iov_iter_folio_queue(&iter
, WRITE
, folioq
, 0, 0, pr
->to
);
511 iov_iter_advance(&iter
, pr
->from
);
512 copied
= copy_from_iter(scratch
+ i
, size
, &iter
);
514 KUNIT_EXPECT_EQ(test
, copied
, size
);
515 KUNIT_EXPECT_EQ(test
, iter
.count
, 0);
516 KUNIT_EXPECT_EQ(test
, iter
.iov_offset
, pr
->to
% PAGE_SIZE
);
520 /* Build the expected image in the main buffer. */
522 memset(buffer
, 0, bufsize
);
523 for (pr
= kvec_test_ranges
; pr
->from
>= 0; pr
++) {
524 for (j
= pr
->from
; j
< pr
->to
; j
++) {
525 buffer
[i
++] = pattern(j
);
532 /* Compare the images */
533 for (i
= 0; i
< bufsize
; i
++) {
534 KUNIT_EXPECT_EQ_MSG(test
, scratch
[i
], buffer
[i
], "at i=%x", i
);
535 if (scratch
[i
] != buffer
[i
])
542 static void iov_kunit_destroy_xarray(void *data
)
544 struct xarray
*xarray
= data
;
550 static void __init
iov_kunit_load_xarray(struct kunit
*test
,
551 struct iov_iter
*iter
, int dir
,
552 struct xarray
*xarray
,
553 struct page
**pages
, size_t npages
)
558 for (i
= 0; i
< npages
; i
++) {
559 void *x
= xa_store(xarray
, i
, pages
[i
], GFP_KERNEL
);
561 KUNIT_ASSERT_FALSE(test
, xa_is_err(x
));
564 iov_iter_xarray(iter
, dir
, xarray
, 0, size
);
567 static struct xarray
*iov_kunit_create_xarray(struct kunit
*test
)
569 struct xarray
*xarray
;
571 xarray
= kzalloc(sizeof(struct xarray
), GFP_KERNEL
);
573 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, xarray
);
574 kunit_add_action_or_reset(test
, iov_kunit_destroy_xarray
, xarray
);
579 * Test copying to a ITER_XARRAY-type iterator.
581 static void __init
iov_kunit_copy_to_xarray(struct kunit
*test
)
583 const struct kvec_test_range
*pr
;
584 struct iov_iter iter
;
585 struct xarray
*xarray
;
586 struct page
**spages
, **bpages
;
587 u8
*scratch
, *buffer
;
588 size_t bufsize
, npages
, size
, copied
;
592 npages
= bufsize
/ PAGE_SIZE
;
594 xarray
= iov_kunit_create_xarray(test
);
596 scratch
= iov_kunit_create_buffer(test
, &spages
, npages
);
597 for (i
= 0; i
< bufsize
; i
++)
598 scratch
[i
] = pattern(i
);
600 buffer
= iov_kunit_create_buffer(test
, &bpages
, npages
);
601 memset(buffer
, 0, bufsize
);
603 iov_kunit_load_xarray(test
, &iter
, READ
, xarray
, bpages
, npages
);
606 for (pr
= kvec_test_ranges
; pr
->from
>= 0; pr
++) {
607 size
= pr
->to
- pr
->from
;
608 KUNIT_ASSERT_LE(test
, pr
->to
, bufsize
);
610 iov_iter_xarray(&iter
, READ
, xarray
, pr
->from
, size
);
611 copied
= copy_to_iter(scratch
+ i
, size
, &iter
);
613 KUNIT_EXPECT_EQ(test
, copied
, size
);
614 KUNIT_EXPECT_EQ(test
, iter
.count
, 0);
615 KUNIT_EXPECT_EQ(test
, iter
.iov_offset
, size
);
619 /* Build the expected image in the scratch buffer. */
621 memset(scratch
, 0, bufsize
);
622 for (pr
= kvec_test_ranges
; pr
->from
>= 0; pr
++)
623 for (i
= pr
->from
; i
< pr
->to
; i
++)
624 scratch
[i
] = pattern(patt
++);
626 /* Compare the images */
627 for (i
= 0; i
< bufsize
; i
++) {
628 KUNIT_EXPECT_EQ_MSG(test
, buffer
[i
], scratch
[i
], "at i=%x", i
);
629 if (buffer
[i
] != scratch
[i
])
637 * Test copying from a ITER_XARRAY-type iterator.
639 static void __init
iov_kunit_copy_from_xarray(struct kunit
*test
)
641 const struct kvec_test_range
*pr
;
642 struct iov_iter iter
;
643 struct xarray
*xarray
;
644 struct page
**spages
, **bpages
;
645 u8
*scratch
, *buffer
;
646 size_t bufsize
, npages
, size
, copied
;
650 npages
= bufsize
/ PAGE_SIZE
;
652 xarray
= iov_kunit_create_xarray(test
);
654 buffer
= iov_kunit_create_buffer(test
, &bpages
, npages
);
655 for (i
= 0; i
< bufsize
; i
++)
656 buffer
[i
] = pattern(i
);
658 scratch
= iov_kunit_create_buffer(test
, &spages
, npages
);
659 memset(scratch
, 0, bufsize
);
661 iov_kunit_load_xarray(test
, &iter
, READ
, xarray
, bpages
, npages
);
664 for (pr
= kvec_test_ranges
; pr
->from
>= 0; pr
++) {
665 size
= pr
->to
- pr
->from
;
666 KUNIT_ASSERT_LE(test
, pr
->to
, bufsize
);
668 iov_iter_xarray(&iter
, WRITE
, xarray
, pr
->from
, size
);
669 copied
= copy_from_iter(scratch
+ i
, size
, &iter
);
671 KUNIT_EXPECT_EQ(test
, copied
, size
);
672 KUNIT_EXPECT_EQ(test
, iter
.count
, 0);
673 KUNIT_EXPECT_EQ(test
, iter
.iov_offset
, size
);
677 /* Build the expected image in the main buffer. */
679 memset(buffer
, 0, bufsize
);
680 for (pr
= kvec_test_ranges
; pr
->from
>= 0; pr
++) {
681 for (j
= pr
->from
; j
< pr
->to
; j
++) {
682 buffer
[i
++] = pattern(j
);
689 /* Compare the images */
690 for (i
= 0; i
< bufsize
; i
++) {
691 KUNIT_EXPECT_EQ_MSG(test
, scratch
[i
], buffer
[i
], "at i=%x", i
);
692 if (scratch
[i
] != buffer
[i
])
700 * Test the extraction of ITER_KVEC-type iterators.
702 static void __init
iov_kunit_extract_pages_kvec(struct kunit
*test
)
704 const struct kvec_test_range
*pr
;
705 struct iov_iter iter
;
706 struct page
**bpages
, *pagelist
[8], **pages
= pagelist
;
710 size_t bufsize
, size
= 0, npages
;
714 npages
= bufsize
/ PAGE_SIZE
;
716 buffer
= iov_kunit_create_buffer(test
, &bpages
, npages
);
718 iov_kunit_load_kvec(test
, &iter
, READ
, kvec
, ARRAY_SIZE(kvec
),
719 buffer
, bufsize
, kvec_test_ranges
);
722 pr
= kvec_test_ranges
;
725 size_t offset0
= LONG_MAX
;
727 for (i
= 0; i
< ARRAY_SIZE(pagelist
); i
++)
728 pagelist
[i
] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL
;
730 len
= iov_iter_extract_pages(&iter
, &pages
, 100 * 1024,
731 ARRAY_SIZE(pagelist
), 0, &offset0
);
732 KUNIT_EXPECT_GE(test
, len
, 0);
735 KUNIT_EXPECT_GE(test
, (ssize_t
)offset0
, 0);
736 KUNIT_EXPECT_LT(test
, offset0
, PAGE_SIZE
);
737 KUNIT_EXPECT_LE(test
, len
, size
);
738 KUNIT_EXPECT_EQ(test
, iter
.count
, size
- len
);
744 for (i
= 0; i
< ARRAY_SIZE(pagelist
); i
++) {
746 ssize_t part
= min_t(ssize_t
, len
, PAGE_SIZE
- offset0
);
749 KUNIT_ASSERT_GE(test
, part
, 0);
750 while (from
== pr
->to
) {
756 ix
= from
/ PAGE_SIZE
;
757 KUNIT_ASSERT_LT(test
, ix
, npages
);
759 KUNIT_EXPECT_PTR_EQ(test
, pagelist
[i
], p
);
760 KUNIT_EXPECT_EQ(test
, offset0
, from
% PAGE_SIZE
);
763 KUNIT_ASSERT_GE(test
, len
, 0);
769 if (test
->status
== KUNIT_FAILURE
)
771 } while (iov_iter_count(&iter
) > 0);
774 KUNIT_EXPECT_EQ(test
, size
, 0);
775 KUNIT_EXPECT_EQ(test
, iter
.count
, 0);
780 * Test the extraction of ITER_BVEC-type iterators.
782 static void __init
iov_kunit_extract_pages_bvec(struct kunit
*test
)
784 const struct bvec_test_range
*pr
;
785 struct iov_iter iter
;
786 struct page
**bpages
, *pagelist
[8], **pages
= pagelist
;
787 struct bio_vec bvec
[8];
789 size_t bufsize
, size
= 0, npages
;
793 npages
= bufsize
/ PAGE_SIZE
;
795 iov_kunit_create_buffer(test
, &bpages
, npages
);
796 iov_kunit_load_bvec(test
, &iter
, READ
, bvec
, ARRAY_SIZE(bvec
),
797 bpages
, npages
, bufsize
, bvec_test_ranges
);
800 pr
= bvec_test_ranges
;
803 size_t offset0
= LONG_MAX
;
805 for (i
= 0; i
< ARRAY_SIZE(pagelist
); i
++)
806 pagelist
[i
] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL
;
808 len
= iov_iter_extract_pages(&iter
, &pages
, 100 * 1024,
809 ARRAY_SIZE(pagelist
), 0, &offset0
);
810 KUNIT_EXPECT_GE(test
, len
, 0);
813 KUNIT_EXPECT_GE(test
, (ssize_t
)offset0
, 0);
814 KUNIT_EXPECT_LT(test
, offset0
, PAGE_SIZE
);
815 KUNIT_EXPECT_LE(test
, len
, size
);
816 KUNIT_EXPECT_EQ(test
, iter
.count
, size
- len
);
822 for (i
= 0; i
< ARRAY_SIZE(pagelist
); i
++) {
824 ssize_t part
= min_t(ssize_t
, len
, PAGE_SIZE
- offset0
);
827 KUNIT_ASSERT_GE(test
, part
, 0);
828 while (from
== pr
->to
) {
834 ix
= pr
->page
+ from
/ PAGE_SIZE
;
835 KUNIT_ASSERT_LT(test
, ix
, npages
);
837 KUNIT_EXPECT_PTR_EQ(test
, pagelist
[i
], p
);
838 KUNIT_EXPECT_EQ(test
, offset0
, from
% PAGE_SIZE
);
841 KUNIT_ASSERT_GE(test
, len
, 0);
847 if (test
->status
== KUNIT_FAILURE
)
849 } while (iov_iter_count(&iter
) > 0);
852 KUNIT_EXPECT_EQ(test
, size
, 0);
853 KUNIT_EXPECT_EQ(test
, iter
.count
, 0);
858 * Test the extraction of ITER_FOLIOQ-type iterators.
860 static void __init
iov_kunit_extract_pages_folioq(struct kunit
*test
)
862 const struct kvec_test_range
*pr
;
863 struct folio_queue
*folioq
;
864 struct iov_iter iter
;
865 struct page
**bpages
, *pagelist
[8], **pages
= pagelist
;
867 size_t bufsize
, size
= 0, npages
;
871 npages
= bufsize
/ PAGE_SIZE
;
873 folioq
= iov_kunit_create_folioq(test
);
875 iov_kunit_create_buffer(test
, &bpages
, npages
);
876 iov_kunit_load_folioq(test
, &iter
, READ
, folioq
, bpages
, npages
);
878 for (pr
= kvec_test_ranges
; pr
->from
>= 0; pr
++) {
880 size
= pr
->to
- from
;
881 KUNIT_ASSERT_LE(test
, pr
->to
, bufsize
);
883 iov_iter_folio_queue(&iter
, WRITE
, folioq
, 0, 0, pr
->to
);
884 iov_iter_advance(&iter
, from
);
887 size_t offset0
= LONG_MAX
;
889 for (i
= 0; i
< ARRAY_SIZE(pagelist
); i
++)
890 pagelist
[i
] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL
;
892 len
= iov_iter_extract_pages(&iter
, &pages
, 100 * 1024,
893 ARRAY_SIZE(pagelist
), 0, &offset0
);
894 KUNIT_EXPECT_GE(test
, len
, 0);
897 KUNIT_EXPECT_LE(test
, len
, size
);
898 KUNIT_EXPECT_EQ(test
, iter
.count
, size
- len
);
902 KUNIT_EXPECT_GE(test
, (ssize_t
)offset0
, 0);
903 KUNIT_EXPECT_LT(test
, offset0
, PAGE_SIZE
);
905 for (i
= 0; i
< ARRAY_SIZE(pagelist
); i
++) {
907 ssize_t part
= min_t(ssize_t
, len
, PAGE_SIZE
- offset0
);
910 KUNIT_ASSERT_GE(test
, part
, 0);
911 ix
= from
/ PAGE_SIZE
;
912 KUNIT_ASSERT_LT(test
, ix
, npages
);
914 KUNIT_EXPECT_PTR_EQ(test
, pagelist
[i
], p
);
915 KUNIT_EXPECT_EQ(test
, offset0
, from
% PAGE_SIZE
);
918 KUNIT_ASSERT_GE(test
, len
, 0);
924 if (test
->status
== KUNIT_FAILURE
)
926 } while (iov_iter_count(&iter
) > 0);
928 KUNIT_EXPECT_EQ(test
, size
, 0);
929 KUNIT_EXPECT_EQ(test
, iter
.count
, 0);
937 * Test the extraction of ITER_XARRAY-type iterators.
939 static void __init
iov_kunit_extract_pages_xarray(struct kunit
*test
)
941 const struct kvec_test_range
*pr
;
942 struct iov_iter iter
;
943 struct xarray
*xarray
;
944 struct page
**bpages
, *pagelist
[8], **pages
= pagelist
;
946 size_t bufsize
, size
= 0, npages
;
950 npages
= bufsize
/ PAGE_SIZE
;
952 xarray
= iov_kunit_create_xarray(test
);
954 iov_kunit_create_buffer(test
, &bpages
, npages
);
955 iov_kunit_load_xarray(test
, &iter
, READ
, xarray
, bpages
, npages
);
957 for (pr
= kvec_test_ranges
; pr
->from
>= 0; pr
++) {
959 size
= pr
->to
- from
;
960 KUNIT_ASSERT_LE(test
, pr
->to
, bufsize
);
962 iov_iter_xarray(&iter
, WRITE
, xarray
, from
, size
);
965 size_t offset0
= LONG_MAX
;
967 for (i
= 0; i
< ARRAY_SIZE(pagelist
); i
++)
968 pagelist
[i
] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL
;
970 len
= iov_iter_extract_pages(&iter
, &pages
, 100 * 1024,
971 ARRAY_SIZE(pagelist
), 0, &offset0
);
972 KUNIT_EXPECT_GE(test
, len
, 0);
975 KUNIT_EXPECT_LE(test
, len
, size
);
976 KUNIT_EXPECT_EQ(test
, iter
.count
, size
- len
);
980 KUNIT_EXPECT_GE(test
, (ssize_t
)offset0
, 0);
981 KUNIT_EXPECT_LT(test
, offset0
, PAGE_SIZE
);
983 for (i
= 0; i
< ARRAY_SIZE(pagelist
); i
++) {
985 ssize_t part
= min_t(ssize_t
, len
, PAGE_SIZE
- offset0
);
988 KUNIT_ASSERT_GE(test
, part
, 0);
989 ix
= from
/ PAGE_SIZE
;
990 KUNIT_ASSERT_LT(test
, ix
, npages
);
992 KUNIT_EXPECT_PTR_EQ(test
, pagelist
[i
], p
);
993 KUNIT_EXPECT_EQ(test
, offset0
, from
% PAGE_SIZE
);
996 KUNIT_ASSERT_GE(test
, len
, 0);
1002 if (test
->status
== KUNIT_FAILURE
)
1004 } while (iov_iter_count(&iter
) > 0);
1006 KUNIT_EXPECT_EQ(test
, size
, 0);
1007 KUNIT_EXPECT_EQ(test
, iter
.count
, 0);
1008 KUNIT_EXPECT_EQ(test
, iter
.iov_offset
, pr
->to
- pr
->from
);
1012 KUNIT_SUCCEED(test
);
1015 static struct kunit_case __refdata iov_kunit_cases
[] = {
1016 KUNIT_CASE(iov_kunit_copy_to_kvec
),
1017 KUNIT_CASE(iov_kunit_copy_from_kvec
),
1018 KUNIT_CASE(iov_kunit_copy_to_bvec
),
1019 KUNIT_CASE(iov_kunit_copy_from_bvec
),
1020 KUNIT_CASE(iov_kunit_copy_to_folioq
),
1021 KUNIT_CASE(iov_kunit_copy_from_folioq
),
1022 KUNIT_CASE(iov_kunit_copy_to_xarray
),
1023 KUNIT_CASE(iov_kunit_copy_from_xarray
),
1024 KUNIT_CASE(iov_kunit_extract_pages_kvec
),
1025 KUNIT_CASE(iov_kunit_extract_pages_bvec
),
1026 KUNIT_CASE(iov_kunit_extract_pages_folioq
),
1027 KUNIT_CASE(iov_kunit_extract_pages_xarray
),
1031 static struct kunit_suite iov_kunit_suite
= {
1033 .test_cases
= iov_kunit_cases
,
1036 kunit_test_suites(&iov_kunit_suite
);