1 /* Block Device Driver Test driver, by D.C. van Moolenbroek */
4 #include <minix/blockdriver.h>
5 #include <minix/drvlib.h>
7 #include <minix/optset.h>
8 #include <sys/ioc_disk.h>
13 RESULT_OK
, /* exactly as expected */
14 RESULT_DEATH
, /* driver died */
15 RESULT_COMMFAIL
, /* communication failed */
16 RESULT_BADTYPE
, /* bad type in message */
17 RESULT_BADID
, /* bad request ID in message */
18 RESULT_BADSTATUS
, /* bad/unexpected status in message */
19 RESULT_TRUNC
, /* request truncated unexpectedly */
20 RESULT_CORRUPT
, /* buffer touched erroneously */
21 RESULT_MISSING
, /* buffer left untouched erroneously */
22 RESULT_OVERFLOW
, /* area around buffer touched */
23 RESULT_BADVALUE
/* bad/unexpected return value */
31 static char driver_label
[32] = ""; /* driver DS label */
32 static devminor_t driver_minor
= -1; /* driver's partition minor to use */
33 static endpoint_t driver_endpt
; /* driver endpoint */
35 static int may_write
= FALSE
; /* may we write to the device? */
36 static int sector_size
= 512; /* size of a single disk sector */
37 static int min_read
= 512; /* minimum total size of read req */
38 static int min_write
= 0; /* minimum total size of write req */
39 static int element_size
= 512; /* minimum I/O vector element size */
40 static int max_size
= 131072; /* maximum total size of any req */
41 /* Note that we do not test exceeding the max_size limit, so it is safe to set
42 * it to a value lower than the driver supports.
45 /* These settings are used for automated test runs. */
46 static int contig
= TRUE
; /* allocate contiguous DMA memory? */
47 static int silent
= FALSE
; /* do not produce console output? */
49 static struct part_geom part
; /* base and size of target partition */
51 #define NR_OPENED 10 /* maximum number of opened devices */
52 static dev_t opened
[NR_OPENED
]; /* list of currently opened devices */
53 static int nr_opened
= 0; /* current number of opened devices */
55 static int total_tests
= 0; /* total number of tests performed */
56 static int failed_tests
= 0; /* number of tests that failed */
57 static int failed_groups
= 0; /* nr of groups that had failures */
58 static int group_failure
; /* has this group had a failure yet? */
59 static int driver_deaths
= 0; /* number of restarts that we saw */
61 /* Options supported by this driver. */
62 static struct optset optset_table
[] = {
63 { "label", OPT_STRING
, driver_label
, sizeof(driver_label
) },
64 { "minor", OPT_INT
, &driver_minor
, 10 },
65 { "rw", OPT_BOOL
, &may_write
, TRUE
},
66 { "ro", OPT_BOOL
, &may_write
, FALSE
},
67 { "sector", OPT_INT
, §or_size
, 10 },
68 { "element", OPT_INT
, &element_size
, 10 },
69 { "min_read", OPT_INT
, &min_read
, 10 },
70 { "min_write", OPT_INT
, &min_write
, 10 },
71 { "max", OPT_INT
, &max_size
, 10 },
72 { "nocontig", OPT_BOOL
, &contig
, FALSE
},
73 { "silent", OPT_BOOL
, &silent
, TRUE
},
77 static void output(char *fmt
, ...)
79 /* Print debugging information, unless configured to be silent.
93 static void *alloc_dma_memory(size_t size
)
95 /* Allocate memory that may be used for direct DMA. For most drivers,
96 * this means that the memory has to be physically contiguous. For some
97 * drivers (e.g. VND) we allow non-contiguous allocation, because VM is
98 * currently flaky and does not always manage to provide contiguous
99 * memory even when it should, thus causing needless test failures.
104 ptr
= alloc_contig(size
, 0, NULL
);
106 ptr
= mmap(NULL
, size
, PROT_READ
| PROT_WRITE
,
107 MAP_PREALLOC
| MAP_ANON
, -1, 0);
109 if (ptr
== MAP_FAILED
)
110 panic("unable to allocate %zu bytes of memory", size
);
115 static void free_dma_memory(void *ptr
, size_t size
)
117 /* Free memory previously allocated for direct DMA. */
119 free_contig(ptr
, size
);
124 static int set_result(result_t
*res
, int type
, ssize_t value
)
126 /* Set the result to the given result type and with the given optional
127 * extra value. Return the type.
135 static int accept_result(result_t
*res
, int type
, ssize_t value
)
137 /* If the result is of the given type and value, reset it to a success
138 * result. This allows for a logical OR on error codes. Return whether
139 * the result was indeed reset.
142 if (res
->type
== type
&& res
->value
== value
) {
143 set_result(res
, RESULT_OK
, 0);
151 static void got_result(result_t
*res
, char *desc
)
153 /* Process the result of a test. Keep statistics.
158 if (res
->type
!= RESULT_OK
) {
161 if (group_failure
== FALSE
) {
163 group_failure
= TRUE
;
167 output("#%02d: %-38s\t[%s]\n", ++i
, desc
,
168 (res
->type
== RESULT_OK
) ? "PASS" : "FAIL");
172 output("- driver died\n");
174 case RESULT_COMMFAIL
:
175 output("- communication failed; ipc_sendrec returned %d\n",
179 output("- bad type %d in reply message\n", res
->value
);
182 output("- mismatched ID %d in reply message\n", res
->value
);
184 case RESULT_BADSTATUS
:
185 output("- bad or unexpected status %d in reply message\n",
189 output("- result size not as expected (%u bytes left)\n",
193 output("- buffer has been modified erroneously\n");
196 output("- buffer has been left untouched erroneously\n");
198 case RESULT_OVERFLOW
:
199 output("- area around target buffer modified\n");
201 case RESULT_BADVALUE
:
202 output("- bad or unexpected return value %d from call\n",
208 static void test_group(char *name
, int exec
)
210 /* Start a new group of tests.
213 output("Test group: %s%s\n", name
, exec
? "" : " (skipping)");
215 group_failure
= FALSE
;
218 static void reopen_device(dev_t minor
)
220 /* Reopen a device after we were notified that the driver has died.
221 * Explicitly ignore any errors here; this is a feeble attempt to get
222 * ourselves back into business again.
226 memset(&m
, 0, sizeof(m
));
227 m
.m_type
= BDEV_OPEN
;
228 m
.m_lbdev_lblockdriver_msg
.minor
= minor
;
229 m
.m_lbdev_lblockdriver_msg
.access
= (may_write
) ? (BDEV_R_BIT
| BDEV_W_BIT
) : BDEV_R_BIT
;
230 m
.m_lbdev_lblockdriver_msg
.id
= 0;
232 (void) ipc_sendrec(driver_endpt
, &m
);
235 static int sendrec_driver(message
*m_ptr
, ssize_t exp
, result_t
*res
)
237 /* Make a call to the driver, and perform basic checks on the return
238 * message. Fill in the result structure, wiping out what was in there
239 * before. If the driver dies in the process, attempt to recover but
243 endpoint_t last_endpt
;
248 r
= ipc_sendrec(driver_endpt
, m_ptr
);
250 if (r
== EDEADSRCDST
) {
251 /* The driver has died. Find its new endpoint, and reopen all
252 * devices that we opened earlier. Then return failure.
254 output("WARNING: driver has died, attempting to proceed\n");
258 /* Keep trying until we get a new endpoint. */
259 last_endpt
= driver_endpt
;
261 r
= ds_retrieve_label_endpt(driver_label
,
264 if (r
== OK
&& last_endpt
!= driver_endpt
)
270 for (i
= 0; i
< nr_opened
; i
++)
271 reopen_device(opened
[i
]);
273 return set_result(res
, RESULT_DEATH
, 0);
277 return set_result(res
, RESULT_COMMFAIL
, r
);
279 if (m_ptr
->m_type
!= BDEV_REPLY
)
280 return set_result(res
, RESULT_BADTYPE
, m_ptr
->m_type
);
282 if (m_ptr
->m_lblockdriver_lbdev_reply
.id
!= m_orig
.m_lbdev_lblockdriver_msg
.id
)
283 return set_result(res
, RESULT_BADID
,
284 m_ptr
->m_lblockdriver_lbdev_reply
.id
);
286 if ((exp
< 0 && m_ptr
->m_lblockdriver_lbdev_reply
.status
>= 0) ||
288 m_ptr
->m_lblockdriver_lbdev_reply
.status
< 0))
289 return set_result(res
, RESULT_BADSTATUS
,
290 m_ptr
->m_lblockdriver_lbdev_reply
.status
);
292 return set_result(res
, RESULT_OK
, 0);
295 static void raw_xfer(dev_t minor
, u64_t pos
, iovec_s_t
*iovec
, int nr_req
,
296 int write
, ssize_t exp
, result_t
*res
)
298 /* Perform a transfer with a safecopy iovec already supplied.
304 assert(nr_req
<= NR_IOREQS
);
305 assert(!write
|| may_write
);
307 if ((grant
= cpf_grant_direct(driver_endpt
, (vir_bytes
) iovec
,
308 sizeof(*iovec
) * nr_req
, CPF_READ
)) == GRANT_INVALID
)
309 panic("unable to allocate grant");
311 memset(&m
, 0, sizeof(m
));
312 m
.m_type
= write
? BDEV_SCATTER
: BDEV_GATHER
;
313 m
.m_lbdev_lblockdriver_msg
.minor
= minor
;
314 m
.m_lbdev_lblockdriver_msg
.pos
= pos
;
315 m
.m_lbdev_lblockdriver_msg
.count
= nr_req
;
316 m
.m_lbdev_lblockdriver_msg
.grant
= grant
;
317 m
.m_lbdev_lblockdriver_msg
.id
= lrand48();
319 r
= sendrec_driver(&m
, exp
, res
);
321 if (cpf_revoke(grant
) == -1)
322 panic("unable to revoke grant");
327 if (m
.m_lblockdriver_lbdev_reply
.status
== exp
)
331 set_result(res
, RESULT_BADSTATUS
,
332 m
.m_lblockdriver_lbdev_reply
.status
);
334 set_result(res
, RESULT_TRUNC
,
335 exp
- m
.m_lblockdriver_lbdev_reply
.status
);
338 static void vir_xfer(dev_t minor
, u64_t pos
, iovec_t
*iovec
, int nr_req
,
339 int write
, ssize_t exp
, result_t
*res
)
341 /* Perform a transfer, creating and revoking grants for the I/O vector.
343 iovec_s_t iov_s
[NR_IOREQS
];
346 assert(nr_req
<= NR_IOREQS
);
348 for (i
= 0; i
< nr_req
; i
++) {
349 iov_s
[i
].iov_size
= iovec
[i
].iov_size
;
351 if ((iov_s
[i
].iov_grant
= cpf_grant_direct(driver_endpt
,
352 (vir_bytes
) iovec
[i
].iov_addr
, iovec
[i
].iov_size
,
353 write
? CPF_READ
: CPF_WRITE
)) == GRANT_INVALID
)
354 panic("unable to allocate grant");
357 raw_xfer(minor
, pos
, iov_s
, nr_req
, write
, exp
, res
);
359 for (i
= 0; i
< nr_req
; i
++) {
360 iovec
[i
].iov_size
= iov_s
[i
].iov_size
;
362 if (cpf_revoke(iov_s
[i
].iov_grant
) == -1)
363 panic("unable to revoke grant");
367 static void simple_xfer(dev_t minor
, u64_t pos
, u8_t
*buf
, size_t size
,
368 int write
, ssize_t exp
, result_t
*res
)
370 /* Perform a transfer involving a single buffer.
374 iov
.iov_addr
= (vir_bytes
) buf
;
377 vir_xfer(minor
, pos
, &iov
, 1, write
, exp
, res
);
380 static void alloc_buf_and_grant(u8_t
**ptr
, cp_grant_id_t
*grant
,
381 size_t size
, int perms
)
383 /* Allocate a buffer suitable for DMA (i.e. contiguous) and create a
384 * grant for it with the requested CPF_* grant permissions.
387 *ptr
= alloc_dma_memory(size
);
389 if ((*grant
= cpf_grant_direct(driver_endpt
, (vir_bytes
) *ptr
, size
,
390 perms
)) == GRANT_INVALID
)
391 panic("unable to allocate grant");
394 static void free_buf_and_grant(u8_t
*ptr
, cp_grant_id_t grant
, size_t size
)
396 /* Revoke a grant and free a buffer.
401 free_dma_memory(ptr
, size
);
404 static void bad_read1(void)
406 /* Test various illegal read transfer requests, part 1.
410 cp_grant_id_t grant
, grant2
, grant3
;
415 test_group("bad read requests, part one", TRUE
);
417 #define BUF_SIZE 4096
420 alloc_buf_and_grant(&buf_ptr
, &grant2
, buf_size
, CPF_WRITE
);
422 if ((grant
= cpf_grant_direct(driver_endpt
, (vir_bytes
) &iov
,
423 sizeof(iov
), CPF_READ
)) == GRANT_INVALID
)
424 panic("unable to allocate grant");
426 /* Initialize the defaults for some of the tests.
427 * This is a legitimate request for the first block of the partition.
429 memset(&mt
, 0, sizeof(mt
));
430 mt
.m_type
= BDEV_GATHER
;
431 mt
.m_lbdev_lblockdriver_msg
.minor
= driver_minor
;
432 mt
.m_lbdev_lblockdriver_msg
.pos
= 0LL;
433 mt
.m_lbdev_lblockdriver_msg
.count
= 1;
434 mt
.m_lbdev_lblockdriver_msg
.grant
= grant
;
435 mt
.m_lbdev_lblockdriver_msg
.id
= lrand48();
437 memset(&iovt
, 0, sizeof(iovt
));
438 iovt
.iov_grant
= grant2
;
439 iovt
.iov_size
= buf_size
;
441 /* Test normal request. */
445 sendrec_driver(&m
, OK
, &res
);
447 if (res
.type
== RESULT_OK
&&
448 m
.m_lblockdriver_lbdev_reply
.status
!= (ssize_t
) iov
.iov_size
) {
449 res
.type
= RESULT_TRUNC
;
450 res
.value
= m
.m_lblockdriver_lbdev_reply
.status
;
453 got_result(&res
, "normal request");
455 /* Test zero iovec elements. */
459 m
.m_lbdev_lblockdriver_msg
.count
= 0;
461 sendrec_driver(&m
, EINVAL
, &res
);
463 got_result(&res
, "zero iovec elements");
465 /* Test bad iovec grant. */
468 m
.m_lbdev_lblockdriver_msg
.grant
= GRANT_INVALID
;
470 sendrec_driver(&m
, EINVAL
, &res
);
472 got_result(&res
, "bad iovec grant");
474 /* Test revoked iovec grant. */
478 if ((grant3
= cpf_grant_direct(driver_endpt
, (vir_bytes
) &iov
,
479 sizeof(iov
), CPF_READ
)) == GRANT_INVALID
)
480 panic("unable to allocate grant");
484 m
.m_lbdev_lblockdriver_msg
.grant
= grant3
;
486 sendrec_driver(&m
, EINVAL
, &res
);
488 accept_result(&res
, RESULT_BADSTATUS
, EPERM
);
490 got_result(&res
, "revoked iovec grant");
492 /* Test normal request (final check). */
496 sendrec_driver(&m
, OK
, &res
);
498 if (res
.type
== RESULT_OK
&&
499 m
.m_lblockdriver_lbdev_reply
.status
!= (ssize_t
) iov
.iov_size
) {
500 res
.type
= RESULT_TRUNC
;
501 res
.value
= m
.m_lblockdriver_lbdev_reply
.status
;
504 got_result(&res
, "normal request");
507 free_buf_and_grant(buf_ptr
, grant2
, buf_size
);
512 static u32_t
get_sum(u8_t
*ptr
, size_t size
)
514 /* Compute a checksum over the given buffer.
518 for (sum
= 0; size
> 0; size
--, ptr
++)
519 sum
= sum
^ (sum
<< 5) ^ *ptr
;
524 static u32_t
fill_rand(u8_t
*ptr
, size_t size
)
526 /* Fill the given buffer with random data. Return a checksum over the
531 for (i
= 0; i
< size
; i
++)
532 ptr
[i
] = lrand48() % 256;
534 return get_sum(ptr
, size
);
537 static void test_sum(u8_t
*ptr
, size_t size
, u32_t sum
, int should_match
,
540 /* If the test succeeded so far, check whether the given buffer does
541 * or does not match the given checksum, and adjust the test result
546 if (res
->type
!= RESULT_OK
)
549 sum2
= get_sum(ptr
, size
);
551 if ((sum
== sum2
) != should_match
) {
552 res
->type
= should_match
? RESULT_CORRUPT
: RESULT_MISSING
;
553 res
->value
= 0; /* not much that's useful here */
557 static void bad_read2(void)
559 /* Test various illegal read transfer requests, part 2.
561 * Consider allowing this test to be run twice, with different buffer
562 * sizes. It appears that we can make at_wini misbehave by making the
563 * size exceed the per-operation size (128KB ?). On the other hand, we
564 * then need to start checking partition sizes, possibly.
566 u8_t
*buf_ptr
, *buf2_ptr
, *buf3_ptr
, c1
, c2
;
567 size_t buf_size
, buf2_size
, buf3_size
;
568 cp_grant_id_t buf_grant
, buf2_grant
, buf3_grant
, grant
;
569 u32_t buf_sum
, buf2_sum
, buf3_sum
;
570 iovec_s_t iov
[3], iovt
[3];
573 test_group("bad read requests, part two", TRUE
);
575 buf_size
= buf2_size
= buf3_size
= BUF_SIZE
;
577 alloc_buf_and_grant(&buf_ptr
, &buf_grant
, buf_size
, CPF_WRITE
);
578 alloc_buf_and_grant(&buf2_ptr
, &buf2_grant
, buf2_size
, CPF_WRITE
);
579 alloc_buf_and_grant(&buf3_ptr
, &buf3_grant
, buf3_size
, CPF_WRITE
);
581 iovt
[0].iov_grant
= buf_grant
;
582 iovt
[0].iov_size
= buf_size
;
583 iovt
[1].iov_grant
= buf2_grant
;
584 iovt
[1].iov_size
= buf2_size
;
585 iovt
[2].iov_grant
= buf3_grant
;
586 iovt
[2].iov_size
= buf3_size
;
588 /* Test normal vector request. */
589 memcpy(iov
, iovt
, sizeof(iovt
));
591 buf_sum
= fill_rand(buf_ptr
, buf_size
);
592 buf2_sum
= fill_rand(buf2_ptr
, buf2_size
);
593 buf3_sum
= fill_rand(buf3_ptr
, buf3_size
);
595 raw_xfer(driver_minor
, 0ULL, iov
, 3, FALSE
,
596 buf_size
+ buf2_size
+ buf3_size
, &res
);
598 test_sum(buf_ptr
, buf_size
, buf_sum
, FALSE
, &res
);
599 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, FALSE
, &res
);
600 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, FALSE
, &res
);
602 got_result(&res
, "normal vector request");
604 /* Test zero sized iovec element. */
605 memcpy(iov
, iovt
, sizeof(iovt
));
608 buf_sum
= fill_rand(buf_ptr
, buf_size
);
609 buf2_sum
= fill_rand(buf2_ptr
, buf2_size
);
610 buf3_sum
= fill_rand(buf3_ptr
, buf3_size
);
612 raw_xfer(driver_minor
, 0ULL, iov
, 3, FALSE
, EINVAL
, &res
);
614 test_sum(buf_ptr
, buf_size
, buf_sum
, TRUE
, &res
);
615 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, TRUE
, &res
);
616 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, TRUE
, &res
);
618 got_result(&res
, "zero size in iovec element");
620 /* Test negative sized iovec element. */
621 memcpy(iov
, iovt
, sizeof(iovt
));
622 iov
[1].iov_size
= (vir_bytes
) LONG_MAX
+ 1;
624 raw_xfer(driver_minor
, 0ULL, iov
, 3, FALSE
, EINVAL
, &res
);
626 test_sum(buf_ptr
, buf_size
, buf_sum
, TRUE
, &res
);
627 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, TRUE
, &res
);
628 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, TRUE
, &res
);
630 got_result(&res
, "negative size in iovec element");
632 /* Test iovec with negative total size. */
633 memcpy(iov
, iovt
, sizeof(iovt
));
634 iov
[0].iov_size
= LONG_MAX
/ 2 - 1;
635 iov
[1].iov_size
= LONG_MAX
/ 2 - 1;
637 raw_xfer(driver_minor
, 0ULL, iov
, 3, FALSE
, EINVAL
, &res
);
639 test_sum(buf_ptr
, buf_size
, buf_sum
, TRUE
, &res
);
640 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, TRUE
, &res
);
641 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, TRUE
, &res
);
643 got_result(&res
, "negative total size");
645 /* Test iovec with wrapping total size. */
646 memcpy(iov
, iovt
, sizeof(iovt
));
647 iov
[0].iov_size
= LONG_MAX
- 1;
648 iov
[1].iov_size
= LONG_MAX
- 1;
650 raw_xfer(driver_minor
, 0ULL, iov
, 3, FALSE
, EINVAL
, &res
);
652 test_sum(buf_ptr
, buf_size
, buf_sum
, TRUE
, &res
);
653 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, TRUE
, &res
);
654 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, TRUE
, &res
);
656 got_result(&res
, "wrapping total size");
658 /* Test word-unaligned iovec element size. */
659 memcpy(iov
, iovt
, sizeof(iovt
));
662 buf_sum
= fill_rand(buf_ptr
, buf_size
);
663 buf2_sum
= fill_rand(buf2_ptr
, buf2_size
);
664 buf3_sum
= fill_rand(buf3_ptr
, buf3_size
);
665 c1
= buf2_ptr
[buf2_size
- 1];
667 raw_xfer(driver_minor
, 0ULL, iov
, 3, FALSE
, BUF_SIZE
* 3 - 1,
670 if (accept_result(&res
, RESULT_BADSTATUS
, EINVAL
)) {
671 /* Do not test the first buffer, as it may contain a partial
674 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, TRUE
, &res
);
675 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, TRUE
, &res
);
677 test_sum(buf_ptr
, buf_size
, buf_sum
, FALSE
, &res
);
678 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, FALSE
, &res
);
679 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, FALSE
, &res
);
680 if (c1
!= buf2_ptr
[buf2_size
- 1])
681 set_result(&res
, RESULT_CORRUPT
, 0);
684 got_result(&res
, "word-unaligned size in iovec element");
686 /* Test invalid grant in iovec element. */
687 memcpy(iov
, iovt
, sizeof(iovt
));
688 iov
[1].iov_grant
= GRANT_INVALID
;
690 fill_rand(buf_ptr
, buf_size
);
691 buf2_sum
= fill_rand(buf2_ptr
, buf2_size
);
692 buf3_sum
= fill_rand(buf3_ptr
, buf3_size
);
694 raw_xfer(driver_minor
, 0ULL, iov
, 3, FALSE
, EINVAL
, &res
);
696 /* Do not test the first buffer, as it may contain a partial result. */
697 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, TRUE
, &res
);
698 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, TRUE
, &res
);
700 got_result(&res
, "invalid grant in iovec element");
702 /* Test revoked grant in iovec element. */
703 memcpy(iov
, iovt
, sizeof(iovt
));
704 if ((grant
= cpf_grant_direct(driver_endpt
, (vir_bytes
) buf2_ptr
,
705 buf2_size
, CPF_WRITE
)) == GRANT_INVALID
)
706 panic("unable to allocate grant");
710 iov
[1].iov_grant
= grant
;
712 buf_sum
= fill_rand(buf_ptr
, buf_size
);
713 buf2_sum
= fill_rand(buf2_ptr
, buf2_size
);
714 buf3_sum
= fill_rand(buf3_ptr
, buf3_size
);
716 raw_xfer(driver_minor
, 0ULL, iov
, 3, FALSE
, EINVAL
, &res
);
718 accept_result(&res
, RESULT_BADSTATUS
, EPERM
);
720 /* Do not test the first buffer, as it may contain a partial result. */
721 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, TRUE
, &res
);
722 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, TRUE
, &res
);
724 got_result(&res
, "revoked grant in iovec element");
726 /* Test read-only grant in iovec element. */
727 memcpy(iov
, iovt
, sizeof(iovt
));
728 if ((grant
= cpf_grant_direct(driver_endpt
, (vir_bytes
) buf2_ptr
,
729 buf2_size
, CPF_READ
)) == GRANT_INVALID
)
730 panic("unable to allocate grant");
732 iov
[1].iov_grant
= grant
;
734 buf_sum
= fill_rand(buf_ptr
, buf_size
);
735 buf2_sum
= fill_rand(buf2_ptr
, buf2_size
);
736 buf3_sum
= fill_rand(buf3_ptr
, buf3_size
);
738 raw_xfer(driver_minor
, 0ULL, iov
, 3, FALSE
, EINVAL
, &res
);
740 accept_result(&res
, RESULT_BADSTATUS
, EPERM
);
742 /* Do not test the first buffer, as it may contain a partial result. */
743 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, TRUE
, &res
);
744 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, TRUE
, &res
);
746 got_result(&res
, "read-only grant in iovec element");
750 /* Test word-unaligned iovec element buffer. */
751 memcpy(iov
, iovt
, sizeof(iovt
));
752 if ((grant
= cpf_grant_direct(driver_endpt
, (vir_bytes
) (buf2_ptr
+ 1),
753 buf2_size
- 2, CPF_WRITE
)) == GRANT_INVALID
)
754 panic("unable to allocate grant");
756 iov
[1].iov_grant
= grant
;
757 iov
[1].iov_size
= buf2_size
- 2;
759 buf_sum
= fill_rand(buf_ptr
, buf_size
);
760 buf2_sum
= fill_rand(buf2_ptr
, buf2_size
);
761 buf3_sum
= fill_rand(buf3_ptr
, buf3_size
);
763 c2
= buf2_ptr
[buf2_size
- 1];
765 raw_xfer(driver_minor
, 0ULL, iov
, 3, FALSE
, BUF_SIZE
* 3 - 2, &res
);
767 if (accept_result(&res
, RESULT_BADSTATUS
, EINVAL
)) {
768 /* Do not test the first buffer, as it may contain a partial
771 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, TRUE
, &res
);
772 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, TRUE
, &res
);
774 test_sum(buf_ptr
, buf_size
, buf_sum
, FALSE
, &res
);
775 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, FALSE
, &res
);
776 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, FALSE
, &res
);
777 if (c1
!= buf2_ptr
[0] || c2
!= buf2_ptr
[buf2_size
- 1])
778 set_result(&res
, RESULT_CORRUPT
, 0);
781 got_result(&res
, "word-unaligned buffer in iovec element");
785 /* Test word-unaligned position. */
786 /* Only perform this test if the minimum read size is not 1, in which
787 * case it is safe to assume that the driver expects no position
788 * alignment either. These tests are indeed not exhaustive yet. For now
789 * we assume that if no alignment is required at all, the driver does
790 * not implement special logic to achieve this, so we don't need to
791 * test all possible positions and sizes either (yes, laziness..).
794 memcpy(iov
, iovt
, sizeof(iovt
));
796 buf_sum
= fill_rand(buf_ptr
, buf_size
);
797 buf2_sum
= fill_rand(buf2_ptr
, buf2_size
);
798 buf3_sum
= fill_rand(buf3_ptr
, buf3_size
);
800 raw_xfer(driver_minor
, 1ULL, iov
, 3, FALSE
, EINVAL
, &res
);
802 test_sum(buf_ptr
, buf_size
, buf_sum
, TRUE
, &res
);
803 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, TRUE
, &res
);
804 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, TRUE
, &res
);
806 got_result(&res
, "word-unaligned position");
809 /* Test normal vector request (final check). */
810 memcpy(iov
, iovt
, sizeof(iovt
));
812 buf_sum
= fill_rand(buf_ptr
, buf_size
);
813 buf2_sum
= fill_rand(buf2_ptr
, buf2_size
);
814 buf3_sum
= fill_rand(buf3_ptr
, buf3_size
);
816 raw_xfer(driver_minor
, 0ULL, iov
, 3, FALSE
,
817 buf_size
+ buf2_size
+ buf3_size
, &res
);
819 test_sum(buf_ptr
, buf_size
, buf_sum
, FALSE
, &res
);
820 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, FALSE
, &res
);
821 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, FALSE
, &res
);
823 got_result(&res
, "normal vector request");
826 free_buf_and_grant(buf3_ptr
, buf3_grant
, buf3_size
);
827 free_buf_and_grant(buf2_ptr
, buf2_grant
, buf2_size
);
828 free_buf_and_grant(buf_ptr
, buf_grant
, buf_size
);
831 static void bad_write(void)
833 /* Test various illegal write transfer requests, if writing is allowed.
834 * If handled correctly, these requests will not actually write data.
835 * This part of the test set is in need of further expansion.
837 u8_t
*buf_ptr
, *buf2_ptr
, *buf3_ptr
;
838 size_t buf_size
, buf2_size
, buf3_size
, sector_unalign
;
839 cp_grant_id_t buf_grant
, buf2_grant
, buf3_grant
;
841 u32_t buf_sum
, buf2_sum
, buf3_sum
;
842 iovec_s_t iov
[3], iovt
[3];
845 test_group("bad write requests", may_write
);
850 buf_size
= buf2_size
= buf3_size
= BUF_SIZE
;
852 alloc_buf_and_grant(&buf_ptr
, &buf_grant
, buf_size
, CPF_READ
);
853 alloc_buf_and_grant(&buf2_ptr
, &buf2_grant
, buf2_size
, CPF_READ
);
854 alloc_buf_and_grant(&buf3_ptr
, &buf3_grant
, buf3_size
, CPF_READ
);
856 iovt
[0].iov_grant
= buf_grant
;
857 iovt
[0].iov_size
= buf_size
;
858 iovt
[1].iov_grant
= buf2_grant
;
859 iovt
[1].iov_size
= buf2_size
;
860 iovt
[2].iov_grant
= buf3_grant
;
861 iovt
[2].iov_size
= buf3_size
;
863 /* Only perform write alignment tests if writes require alignment. */
865 min_write
= sector_size
;
868 /* If min_write is larger than 2, use 2 as sector-unaligned
869 * size, as word-unaligned values (e.g., 1) may be filtered out
870 * on another code path.
872 sector_unalign
= (min_write
> 2) ? 2 : 1;
874 /* Test sector-unaligned write position. */
875 memcpy(iov
, iovt
, sizeof(iovt
));
877 buf_sum
= fill_rand(buf_ptr
, buf_size
);
878 buf2_sum
= fill_rand(buf2_ptr
, buf2_size
);
879 buf3_sum
= fill_rand(buf3_ptr
, buf3_size
);
881 raw_xfer(driver_minor
, (u64_t
)sector_unalign
, iov
, 3, TRUE
,
884 test_sum(buf_ptr
, buf_size
, buf_sum
, TRUE
, &res
);
885 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, TRUE
, &res
);
886 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, TRUE
, &res
);
888 got_result(&res
, "sector-unaligned write position");
890 /* Test sector-unaligned write size. */
891 memcpy(iov
, iovt
, sizeof(iovt
));
892 iov
[1].iov_size
-= sector_unalign
;
894 buf_sum
= fill_rand(buf_ptr
, buf_size
);
895 buf2_sum
= fill_rand(buf2_ptr
, buf2_size
);
896 buf3_sum
= fill_rand(buf3_ptr
, buf3_size
);
898 raw_xfer(driver_minor
, 0ULL, iov
, 3, TRUE
, EINVAL
, &res
);
900 test_sum(buf_ptr
, buf_size
, buf_sum
, TRUE
, &res
);
901 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, TRUE
, &res
);
902 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, TRUE
, &res
);
904 got_result(&res
, "sector-unaligned write size");
907 /* Test write-only grant in iovec element. */
908 memcpy(iov
, iovt
, sizeof(iovt
));
909 if ((grant
= cpf_grant_direct(driver_endpt
, (vir_bytes
) buf2_ptr
,
910 buf2_size
, CPF_WRITE
)) == GRANT_INVALID
)
911 panic("unable to allocate grant");
913 iov
[1].iov_grant
= grant
;
915 buf_sum
= fill_rand(buf_ptr
, buf_size
);
916 buf2_sum
= fill_rand(buf2_ptr
, buf2_size
);
917 buf3_sum
= fill_rand(buf3_ptr
, buf3_size
);
919 raw_xfer(driver_minor
, 0ULL, iov
, 3, TRUE
, EINVAL
, &res
);
921 accept_result(&res
, RESULT_BADSTATUS
, EPERM
);
923 test_sum(buf_ptr
, buf_size
, buf_sum
, TRUE
, &res
);
924 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, TRUE
, &res
);
925 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, TRUE
, &res
);
927 got_result(&res
, "write-only grant in iovec element");
932 free_buf_and_grant(buf3_ptr
, buf3_grant
, buf3_size
);
933 free_buf_and_grant(buf2_ptr
, buf2_grant
, buf2_size
);
934 free_buf_and_grant(buf_ptr
, buf_grant
, buf_size
);
937 static void vector_and_large_sub(size_t small_size
)
939 /* Check whether large vectored requests, and large single requests,
942 size_t large_size
, buf_size
, buf2_size
;
943 u8_t
*buf_ptr
, *buf2_ptr
;
944 iovec_t iovec
[NR_IOREQS
];
949 base_pos
= (u64_t
)sector_size
;
951 large_size
= small_size
* NR_IOREQS
;
953 buf_size
= large_size
+ sizeof(u32_t
) * 2;
954 buf2_size
= large_size
+ sizeof(u32_t
) * (NR_IOREQS
+ 1);
956 buf_ptr
= alloc_dma_memory(buf_size
);
957 buf2_ptr
= alloc_dma_memory(buf2_size
);
959 /* The first buffer has one large chunk with dword-sized guards on each
960 * side. LPTR(n) points to the start of the nth small data chunk within
961 * the large chunk. The second buffer contains several small chunks. It
962 * has dword-sized guards before each chunk and after the last chunk.
963 * SPTR(n) points to the start of the nth small chunk.
965 #define SPTR(n) (buf2_ptr + sizeof(u32_t) + (n) * (sizeof(u32_t) + small_size))
966 #define LPTR(n) (buf_ptr + sizeof(u32_t) + small_size * (n))
968 /* Write one large chunk, if writing is allowed. */
970 fill_rand(buf_ptr
, buf_size
); /* don't need the checksum */
972 iovec
[0].iov_addr
= (vir_bytes
) (buf_ptr
+ sizeof(u32_t
));
973 iovec
[0].iov_size
= large_size
;
975 vir_xfer(driver_minor
, base_pos
, iovec
, 1, TRUE
, large_size
,
978 got_result(&res
, "large write");
981 /* Read back in many small chunks. If writing is not allowed, do not
984 for (i
= 0; i
< NR_IOREQS
; i
++) {
985 * (((u32_t
*) SPTR(i
)) - 1) = 0xDEADBEEFL
+ i
;
986 iovec
[i
].iov_addr
= (vir_bytes
) SPTR(i
);
987 iovec
[i
].iov_size
= small_size
;
989 * (((u32_t
*) SPTR(i
)) - 1) = 0xFEEDFACEL
;
991 vir_xfer(driver_minor
, base_pos
, iovec
, NR_IOREQS
, FALSE
, large_size
,
994 if (res
.type
== RESULT_OK
) {
995 for (i
= 0; i
< NR_IOREQS
; i
++) {
996 if (* (((u32_t
*) SPTR(i
)) - 1) != 0xDEADBEEFL
+ i
)
997 set_result(&res
, RESULT_OVERFLOW
, 0);
999 if (* (((u32_t
*) SPTR(i
)) - 1) != 0xFEEDFACEL
)
1000 set_result(&res
, RESULT_OVERFLOW
, 0);
1003 if (res
.type
== RESULT_OK
&& may_write
) {
1004 for (i
= 0; i
< NR_IOREQS
; i
++) {
1005 test_sum(SPTR(i
), small_size
,
1006 get_sum(LPTR(i
), small_size
), TRUE
, &res
);
1010 got_result(&res
, "vectored read");
1012 /* Write new data in many small chunks, if writing is allowed. */
1014 fill_rand(buf2_ptr
, buf2_size
); /* don't need the checksum */
1016 for (i
= 0; i
< NR_IOREQS
; i
++) {
1017 iovec
[i
].iov_addr
= (vir_bytes
) SPTR(i
);
1018 iovec
[i
].iov_size
= small_size
;
1021 vir_xfer(driver_minor
, base_pos
, iovec
, NR_IOREQS
, TRUE
,
1024 got_result(&res
, "vectored write");
1027 /* Read back in one large chunk. If writing is allowed, the checksums
1028 * must match the last write; otherwise, they must match the last read.
1029 * In both cases, the expected content is in the second buffer.
1032 * (u32_t
*) buf_ptr
= 0xCAFEBABEL
;
1033 * (u32_t
*) (buf_ptr
+ sizeof(u32_t
) + large_size
) = 0xDECAFBADL
;
1035 iovec
[0].iov_addr
= (vir_bytes
) (buf_ptr
+ sizeof(u32_t
));
1036 iovec
[0].iov_size
= large_size
;
1038 vir_xfer(driver_minor
, base_pos
, iovec
, 1, FALSE
, large_size
, &res
);
1040 if (res
.type
== RESULT_OK
) {
1041 if (* (u32_t
*) buf_ptr
!= 0xCAFEBABEL
)
1042 set_result(&res
, RESULT_OVERFLOW
, 0);
1043 if (* (u32_t
*) (buf_ptr
+ sizeof(u32_t
) + large_size
) !=
1045 set_result(&res
, RESULT_OVERFLOW
, 0);
1048 if (res
.type
== RESULT_OK
) {
1049 for (i
= 0; i
< NR_IOREQS
; i
++) {
1050 test_sum(SPTR(i
), small_size
,
1051 get_sum(LPTR(i
), small_size
), TRUE
, &res
);
1055 got_result(&res
, "large read");
1061 free_dma_memory(buf2_ptr
, buf2_size
);
1062 free_dma_memory(buf_ptr
, buf_size
);
1065 static void vector_and_large(void)
1067 /* Check whether large vectored requests, and large single requests,
1068 * succeed. These are request patterns commonly used by MFS and the
1069 * filter driver, respectively. We try the same test twice: once with
1070 * a common block size, and once to push against the max request size.
1074 /* Make sure that the maximum size does not exceed the target device
1075 * size, minus the margins we need for testing here and there.
1077 if (max_size
> part
.size
- sector_size
* 4)
1078 max_size
= part
.size
- sector_size
* 4;
1080 /* Compute the largest sector multiple which, when multiplied by
1081 * NR_IOREQS, is no more than the maximum transfer size. Note that if
1082 * max_size is not a multiple of sector_size, we're not going up to the
1083 * limit entirely this way.
1085 max_block
= max_size
/ NR_IOREQS
;
1086 max_block
-= max_block
% sector_size
;
1088 #define COMMON_BLOCK_SIZE 4096
1090 test_group("vector and large, common block", TRUE
);
1092 vector_and_large_sub(COMMON_BLOCK_SIZE
);
1094 if (max_block
!= COMMON_BLOCK_SIZE
) {
1095 test_group("vector and large, large block", TRUE
);
1097 vector_and_large_sub(max_block
);
1101 static void open_device(dev_t minor
)
1103 /* Open a partition or subpartition. Remember that it has been opened,
1104 * so that we can reopen it later in the event of a driver crash.
1109 memset(&m
, 0, sizeof(m
));
1110 m
.m_type
= BDEV_OPEN
;
1111 m
.m_lbdev_lblockdriver_msg
.minor
= minor
;
1112 m
.m_lbdev_lblockdriver_msg
.access
= may_write
? (BDEV_R_BIT
| BDEV_W_BIT
) : BDEV_R_BIT
;
1113 m
.m_lbdev_lblockdriver_msg
.id
= lrand48();
1115 sendrec_driver(&m
, OK
, &res
);
1117 /* We assume that this call is supposed to succeed. We pretend it
1118 * always succeeds, so that close_device() won't get confused later.
1120 assert(nr_opened
< NR_OPENED
);
1121 opened
[nr_opened
++] = minor
;
1123 got_result(&res
, minor
== driver_minor
? "opening the main partition" :
1124 "opening a subpartition");
1127 static void close_device(dev_t minor
)
1129 /* Close a partition or subpartition. Remove it from the list of opened
1136 memset(&m
, 0, sizeof(m
));
1137 m
.m_type
= BDEV_CLOSE
;
1138 m
.m_lbdev_lblockdriver_msg
.minor
= minor
;
1139 m
.m_lbdev_lblockdriver_msg
.id
= lrand48();
1141 sendrec_driver(&m
, OK
, &res
);
1143 assert(nr_opened
> 0);
1144 for (i
= 0; i
< nr_opened
; i
++) {
1145 if (opened
[i
] == minor
) {
1146 opened
[i
] = opened
[--nr_opened
];
1151 got_result(&res
, minor
== driver_minor
? "closing the main partition" :
1152 "closing a subpartition");
1155 static int vir_ioctl(dev_t minor
, unsigned long req
, void *ptr
, ssize_t exp
,
1158 /* Perform an I/O control request, using a local buffer.
1160 cp_grant_id_t grant
;
1164 assert(!_MINIX_IOCTL_BIG(req
)); /* not supported */
1167 if (_MINIX_IOCTL_IOR(req
)) perm
|= CPF_WRITE
;
1168 if (_MINIX_IOCTL_IOW(req
)) perm
|= CPF_READ
;
1170 if ((grant
= cpf_grant_direct(driver_endpt
, (vir_bytes
) ptr
,
1171 _MINIX_IOCTL_SIZE(req
), perm
)) == GRANT_INVALID
)
1172 panic("unable to allocate grant");
1174 memset(&m
, 0, sizeof(m
));
1175 m
.m_type
= BDEV_IOCTL
;
1176 m
.m_lbdev_lblockdriver_msg
.minor
= minor
;
1177 m
.m_lbdev_lblockdriver_msg
.request
= req
;
1178 m
.m_lbdev_lblockdriver_msg
.grant
= grant
;
1179 m
.m_lbdev_lblockdriver_msg
.user
= NONE
;
1180 m
.m_lbdev_lblockdriver_msg
.id
= lrand48();
1182 r
= sendrec_driver(&m
, exp
, res
);
1184 if (cpf_revoke(grant
) == -1)
1185 panic("unable to revoke grant");
1190 static void misc_ioctl(void)
1192 /* Test some ioctls.
1197 test_group("test miscellaneous ioctls", TRUE
);
1199 /* Retrieve the main partition's base and size. Save for later. */
1200 vir_ioctl(driver_minor
, DIOCGETP
, &part
, OK
, &res
);
1202 got_result(&res
, "ioctl to get partition");
1204 /* The other tests do not check whether there is sufficient room. */
1205 if (res
.type
== RESULT_OK
&& part
.size
< (u64_t
)max_size
* 2)
1206 output("WARNING: small partition, some tests may fail\n");
1208 /* Test retrieving global driver open count. */
1209 openct
= 0x0badcafe;
1211 vir_ioctl(driver_minor
, DIOCOPENCT
, &openct
, OK
, &res
);
1213 /* We assume that we're the only client to the driver right now. */
1214 if (res
.type
== RESULT_OK
&& openct
!= 1) {
1215 res
.type
= RESULT_BADVALUE
;
1219 got_result(&res
, "ioctl to get open count");
1221 /* Test increasing and re-retrieving open count. */
1222 open_device(driver_minor
);
1224 openct
= 0x0badcafe;
1226 vir_ioctl(driver_minor
, DIOCOPENCT
, &openct
, OK
, &res
);
1228 if (res
.type
== RESULT_OK
&& openct
!= 2) {
1229 res
.type
= RESULT_BADVALUE
;
1233 got_result(&res
, "increased open count after opening");
1235 /* Test decreasing and re-retrieving open count. */
1236 close_device(driver_minor
);
1238 openct
= 0x0badcafe;
1240 vir_ioctl(driver_minor
, DIOCOPENCT
, &openct
, OK
, &res
);
1242 if (res
.type
== RESULT_OK
&& openct
!= 1) {
1243 res
.type
= RESULT_BADVALUE
;
1247 got_result(&res
, "decreased open count after closing");
1250 static void read_limits(dev_t sub0_minor
, dev_t sub1_minor
, size_t sub_size
)
1252 /* Test reads up to, across, and beyond partition limits.
1256 u32_t sum
, sum2
, sum3
;
1259 test_group("read around subpartition limits", TRUE
);
1261 buf_size
= sector_size
* 3;
1262 buf_ptr
= alloc_dma_memory(buf_size
);
1264 /* Read one sector up to the partition limit. */
1265 fill_rand(buf_ptr
, buf_size
);
1267 simple_xfer(sub0_minor
, (u64_t
)sub_size
- sector_size
, buf_ptr
,
1268 sector_size
, FALSE
, sector_size
, &res
);
1270 sum
= get_sum(buf_ptr
, sector_size
);
1272 got_result(&res
, "one sector read up to partition end");
1274 /* Read three sectors up to the partition limit. */
1275 fill_rand(buf_ptr
, buf_size
);
1277 simple_xfer(sub0_minor
, (u64_t
)sub_size
- buf_size
, buf_ptr
, buf_size
,
1278 FALSE
, buf_size
, &res
);
1280 test_sum(buf_ptr
+ sector_size
* 2, sector_size
, sum
, TRUE
, &res
);
1282 sum2
= get_sum(buf_ptr
+ sector_size
, sector_size
* 2);
1284 got_result(&res
, "multisector read up to partition end");
1286 /* Read three sectors, two up to and one beyond the partition end. */
1287 fill_rand(buf_ptr
, buf_size
);
1288 sum3
= get_sum(buf_ptr
+ sector_size
* 2, sector_size
);
1290 simple_xfer(sub0_minor
, (u64_t
)sub_size
- sector_size
* 2, buf_ptr
,
1291 buf_size
, FALSE
, sector_size
* 2, &res
);
1293 test_sum(buf_ptr
, sector_size
* 2, sum2
, TRUE
, &res
);
1294 test_sum(buf_ptr
+ sector_size
* 2, sector_size
, sum3
, TRUE
, &res
);
1296 got_result(&res
, "read somewhat across partition end");
1298 /* Read three sectors, one up to and two beyond the partition end. */
1299 fill_rand(buf_ptr
, buf_size
);
1300 sum2
= get_sum(buf_ptr
+ sector_size
, sector_size
* 2);
1302 simple_xfer(sub0_minor
, (u64_t
)sub_size
- sector_size
, buf_ptr
,
1303 buf_size
, FALSE
, sector_size
, &res
);
1305 test_sum(buf_ptr
, sector_size
, sum
, TRUE
, &res
);
1306 test_sum(buf_ptr
+ sector_size
, sector_size
* 2, sum2
, TRUE
, &res
);
1308 got_result(&res
, "read mostly across partition end");
1310 /* Read one sector starting at the partition end. */
1311 sum
= fill_rand(buf_ptr
, buf_size
);
1312 sum2
= get_sum(buf_ptr
, sector_size
);
1314 simple_xfer(sub0_minor
, (u64_t
)sub_size
, buf_ptr
, sector_size
, FALSE
,
1317 test_sum(buf_ptr
, sector_size
, sum2
, TRUE
, &res
);
1319 got_result(&res
, "one sector read at partition end");
1321 /* Read three sectors starting at the partition end. */
1322 simple_xfer(sub0_minor
, (u64_t
)sub_size
, buf_ptr
, buf_size
, FALSE
, 0,
1325 test_sum(buf_ptr
, buf_size
, sum
, TRUE
, &res
);
1327 got_result(&res
, "multisector read at partition end");
1329 /* Read one sector beyond the partition end. */
1330 simple_xfer(sub0_minor
, (u64_t
)sub_size
+ sector_size
, buf_ptr
,
1331 buf_size
, FALSE
, 0, &res
);
1333 test_sum(buf_ptr
, sector_size
, sum2
, TRUE
, &res
);
1335 got_result(&res
, "single sector read beyond partition end");
1337 /* Read three sectors way beyond the partition end. */
1338 simple_xfer(sub0_minor
, 0x1000000000000000ULL
, buf_ptr
, buf_size
,
1341 test_sum(buf_ptr
, buf_size
, sum
, TRUE
, &res
);
1343 /* Test negative offsets. This request should return EOF or fail; we
1344 * assume that it return EOF here (because that is what the AHCI driver
1345 * does, to avoid producing errors for requests close to the 2^64 byte
1346 * position limit [yes, this will indeed never happen anyway]). This is
1347 * more or less a bad requests test, but we cannot do it without
1348 * setting up subpartitions first.
1350 simple_xfer(sub1_minor
, 0xffffffffffffffffULL
- sector_size
+ 1,
1351 buf_ptr
, sector_size
, FALSE
, 0, &res
);
1353 test_sum(buf_ptr
, sector_size
, sum2
, TRUE
, &res
);
1355 got_result(&res
, "read with negative offset");
1358 free_dma_memory(buf_ptr
, buf_size
);
1361 static void write_limits(dev_t sub0_minor
, dev_t sub1_minor
, size_t sub_size
)
1363 /* Test writes up to, across, and beyond partition limits. Use the
1364 * first given subpartition to test, and the second to make sure there
1365 * are no overruns. The given size is the size of each of the
1366 * subpartitions. Note that the necessity to check the results using
1367 * readback, makes this more or less a superset of the read test.
1371 u32_t sum
, sum2
, sum3
, sub1_sum
;
1374 test_group("write around subpartition limits", may_write
);
1379 buf_size
= sector_size
* 3;
1380 buf_ptr
= alloc_dma_memory(buf_size
);
1382 /* Write to the start of the second subpartition, so that we can
1383 * reliably check whether the contents have changed later.
1385 sub1_sum
= fill_rand(buf_ptr
, buf_size
);
1387 simple_xfer(sub1_minor
, 0ULL, buf_ptr
, buf_size
, TRUE
, buf_size
, &res
);
1389 got_result(&res
, "write to second subpartition");
1391 /* Write one sector, up to the partition limit. */
1392 sum
= fill_rand(buf_ptr
, sector_size
);
1394 simple_xfer(sub0_minor
, (u64_t
)sub_size
- sector_size
, buf_ptr
,
1395 sector_size
, TRUE
, sector_size
, &res
);
1397 got_result(&res
, "write up to partition end");
1399 /* Read back to make sure the results have persisted. */
1400 fill_rand(buf_ptr
, sector_size
* 2);
1402 simple_xfer(sub0_minor
, (u64_t
)sub_size
- sector_size
* 2, buf_ptr
,
1403 sector_size
* 2, FALSE
, sector_size
* 2, &res
);
1405 test_sum(buf_ptr
+ sector_size
, sector_size
, sum
, TRUE
, &res
);
1407 got_result(&res
, "read up to partition end");
1409 /* Write three sectors, two up to and one beyond the partition end. */
1410 fill_rand(buf_ptr
, buf_size
);
1411 sum
= get_sum(buf_ptr
+ sector_size
, sector_size
);
1412 sum3
= get_sum(buf_ptr
, sector_size
);
1414 simple_xfer(sub0_minor
, (u64_t
)sub_size
- sector_size
* 2, buf_ptr
,
1415 buf_size
, TRUE
, sector_size
* 2, &res
);
1417 got_result(&res
, "write somewhat across partition end");
1419 /* Read three sectors, one up to and two beyond the partition end. */
1420 fill_rand(buf_ptr
, buf_size
);
1421 sum2
= get_sum(buf_ptr
+ sector_size
, sector_size
* 2);
1423 simple_xfer(sub0_minor
, (u64_t
)sub_size
- sector_size
, buf_ptr
,
1424 buf_size
, FALSE
, sector_size
, &res
);
1426 test_sum(buf_ptr
, sector_size
, sum
, TRUE
, &res
);
1427 test_sum(buf_ptr
+ sector_size
, sector_size
* 2, sum2
, TRUE
, &res
);
1429 got_result(&res
, "read mostly across partition end");
1431 /* Repeat this but with write and read start positions swapped. */
1432 fill_rand(buf_ptr
, buf_size
);
1433 sum
= get_sum(buf_ptr
, sector_size
);
1435 simple_xfer(sub0_minor
, (u64_t
)sub_size
- sector_size
, buf_ptr
,
1436 buf_size
, TRUE
, sector_size
, &res
);
1438 got_result(&res
, "write mostly across partition end");
1440 fill_rand(buf_ptr
, buf_size
);
1441 sum2
= get_sum(buf_ptr
+ sector_size
* 2, sector_size
);
1443 simple_xfer(sub0_minor
, (u64_t
)sub_size
- sector_size
* 2, buf_ptr
,
1444 buf_size
, FALSE
, sector_size
* 2, &res
);
1446 test_sum(buf_ptr
, sector_size
, sum3
, TRUE
, &res
);
1447 test_sum(buf_ptr
+ sector_size
, sector_size
, sum
, TRUE
, &res
);
1448 test_sum(buf_ptr
+ sector_size
* 2, sector_size
, sum2
, TRUE
, &res
);
1450 got_result(&res
, "read somewhat across partition end");
1452 /* Write one sector at the end of the partition. */
1453 fill_rand(buf_ptr
, sector_size
);
1455 simple_xfer(sub0_minor
, (u64_t
)sub_size
, buf_ptr
, sector_size
, TRUE
, 0,
1458 got_result(&res
, "write at partition end");
1460 /* Write one sector beyond the end of the partition. */
1461 simple_xfer(sub0_minor
, (u64_t
)sub_size
+ sector_size
, buf_ptr
,
1462 sector_size
, TRUE
, 0, &res
);
1464 got_result(&res
, "write beyond partition end");
1466 /* Read from the start of the second subpartition, and see if it
1467 * matches what we wrote into it earlier.
1469 fill_rand(buf_ptr
, buf_size
);
1471 simple_xfer(sub1_minor
, 0ULL, buf_ptr
, buf_size
, FALSE
, buf_size
,
1474 test_sum(buf_ptr
, buf_size
, sub1_sum
, TRUE
, &res
);
1476 got_result(&res
, "read from second subpartition");
1478 /* Test offset wrapping, but this time for writes. */
1479 fill_rand(buf_ptr
, sector_size
);
1481 simple_xfer(sub1_minor
, 0xffffffffffffffffULL
- sector_size
+ 1,
1482 buf_ptr
, sector_size
, TRUE
, 0, &res
);
1484 got_result(&res
, "write with negative offset");
1486 /* If the last request erroneously succeeded, it would have overwritten
1487 * the last sector of the first subpartition.
1489 simple_xfer(sub0_minor
, (u64_t
)sub_size
- sector_size
, buf_ptr
,
1490 sector_size
, FALSE
, sector_size
, &res
);
1492 test_sum(buf_ptr
, sector_size
, sum
, TRUE
, &res
);
1494 got_result(&res
, "read up to partition end");
1497 free_dma_memory(buf_ptr
, buf_size
);
1500 static void vir_limits(dev_t sub0_minor
, dev_t sub1_minor
, int part_secs
)
1502 /* Create virtual, temporary subpartitions through the DIOCSETP ioctl,
1503 * and perform tests on the resulting subpartitions.
1505 struct part_geom subpart
, subpart2
;
1509 test_group("virtual subpartition limits", TRUE
);
1511 /* Open the subpartitions. This is somewhat dodgy; we rely on the
1512 * driver allowing this even if no subpartitions exist. We cannot do
1513 * this test without doing a DIOCSETP on an open subdevice, though.
1515 open_device(sub0_minor
);
1516 open_device(sub1_minor
);
1518 sub_size
= sector_size
* part_secs
;
1520 /* Set, and check, the size of the first subpartition. */
1522 subpart
.size
= (u64_t
)sub_size
;
1524 vir_ioctl(sub0_minor
, DIOCSETP
, &subpart
, OK
, &res
);
1526 got_result(&res
, "ioctl to set first subpartition");
1528 vir_ioctl(sub0_minor
, DIOCGETP
, &subpart2
, OK
, &res
);
1530 if (res
.type
== RESULT_OK
&& (subpart
.base
!= subpart2
.base
||
1531 subpart
.size
!= subpart2
.size
)) {
1532 res
.type
= RESULT_BADVALUE
;
1536 got_result(&res
, "ioctl to get first subpartition");
1538 /* Set, and check, the base and size of the second subpartition. */
1540 subpart
.base
+= sub_size
;
1541 subpart
.size
= (u64_t
)sub_size
;
1543 vir_ioctl(sub1_minor
, DIOCSETP
, &subpart
, OK
, &res
);
1545 got_result(&res
, "ioctl to set second subpartition");
1547 vir_ioctl(sub1_minor
, DIOCGETP
, &subpart2
, OK
, &res
);
1549 if (res
.type
== RESULT_OK
&& (subpart
.base
!= subpart2
.base
||
1550 subpart
.size
!= subpart2
.size
)) {
1551 res
.type
= RESULT_BADVALUE
;
1555 got_result(&res
, "ioctl to get second subpartition");
1557 /* Perform the actual I/O tests. */
1558 read_limits(sub0_minor
, sub1_minor
, sub_size
);
1560 write_limits(sub0_minor
, sub1_minor
, sub_size
);
1563 close_device(sub1_minor
);
1564 close_device(sub0_minor
);
1567 static void real_limits(dev_t sub0_minor
, dev_t sub1_minor
, int part_secs
)
1569 /* Create our own subpartitions by writing a partition table, and
1570 * perform tests on the resulting real subpartitions.
1573 size_t buf_size
, sub_size
;
1574 struct part_geom subpart
;
1575 struct part_entry
*entry
;
1578 test_group("real subpartition limits", may_write
);
1583 sub_size
= sector_size
* part_secs
;
1585 /* Technically, we should be using 512 instead of sector_size in
1586 * various places, because even on CD-ROMs, the partition tables are
1587 * 512 bytes and the sector counts are based on 512-byte sectors in it.
1588 * We ignore this subtlety because CD-ROMs are assumed to be read-only
1591 buf_size
= sector_size
;
1592 buf_ptr
= alloc_dma_memory(buf_size
);
1594 memset(buf_ptr
, 0, buf_size
);
1596 /* Write an invalid partition table. */
1597 simple_xfer(driver_minor
, 0ULL, buf_ptr
, buf_size
, TRUE
, buf_size
,
1600 got_result(&res
, "write of invalid partition table");
1602 /* Get the disk driver to reread the partition table. This should
1603 * happen (at least) when the device is fully closed and then reopened.
1604 * The ioctl test already made sure that we're the only client.
1606 close_device(driver_minor
);
1607 open_device(driver_minor
);
1609 /* See if our changes are visible. We expect the subpartitions to have
1610 * a size of zero now, indicating that they're not there. For actual
1611 * subpartitions (as opposed to normal partitions), this requires the
1612 * driver to zero them out, because the partition code does not do so.
1614 open_device(sub0_minor
);
1615 open_device(sub1_minor
);
1617 vir_ioctl(sub0_minor
, DIOCGETP
, &subpart
, 0, &res
);
1619 if (res
.type
== RESULT_OK
&& subpart
.size
!= 0) {
1620 res
.type
= RESULT_BADVALUE
;
1621 res
.value
= ex64lo(subpart
.size
);
1624 got_result(&res
, "ioctl to get first subpartition");
1626 vir_ioctl(sub1_minor
, DIOCGETP
, &subpart
, 0, &res
);
1628 if (res
.type
== RESULT_OK
&& subpart
.size
!= 0) {
1629 res
.type
= RESULT_BADVALUE
;
1630 res
.value
= ex64lo(subpart
.size
);
1633 got_result(&res
, "ioctl to get second subpartition");
1635 close_device(sub1_minor
);
1636 close_device(sub0_minor
);
1638 /* Now write a valid partition table. */
1639 memset(buf_ptr
, 0, buf_size
);
1641 entry
= (struct part_entry
*) &buf_ptr
[PART_TABLE_OFF
];
1643 entry
[0].sysind
= MINIX_PART
;
1644 entry
[0].lowsec
= part
.base
/ sector_size
+ 1;
1645 entry
[0].size
= part_secs
;
1646 entry
[1].sysind
= MINIX_PART
;
1647 entry
[1].lowsec
= entry
[0].lowsec
+ entry
[0].size
;
1648 entry
[1].size
= part_secs
;
1650 buf_ptr
[510] = 0x55;
1651 buf_ptr
[511] = 0xAA;
1653 simple_xfer(driver_minor
, 0ULL, buf_ptr
, buf_size
, TRUE
, buf_size
,
1656 got_result(&res
, "write of valid partition table");
1658 /* Same as above. */
1659 close_device(driver_minor
);
1660 open_device(driver_minor
);
1662 /* Again, see if our changes are visible. This time the proper base and
1663 * size should be there.
1665 open_device(sub0_minor
);
1666 open_device(sub1_minor
);
1668 vir_ioctl(sub0_minor
, DIOCGETP
, &subpart
, 0, &res
);
1670 if (res
.type
== RESULT_OK
&&
1671 (subpart
.base
!= part
.base
+ sector_size
||
1672 subpart
.size
!= (u64_t
)part_secs
* sector_size
)) {
1674 res
.type
= RESULT_BADVALUE
;
1678 got_result(&res
, "ioctl to get first subpartition");
1680 vir_ioctl(sub1_minor
, DIOCGETP
, &subpart
, 0, &res
);
1682 if (res
.type
== RESULT_OK
&&
1683 (subpart
.base
!= part
.base
+ (1 + part_secs
) * sector_size
||
1684 subpart
.size
!= (u64_t
)part_secs
* sector_size
)) {
1686 res
.type
= RESULT_BADVALUE
;
1690 got_result(&res
, "ioctl to get second subpartition");
1692 /* Now perform the actual I/O tests. */
1693 read_limits(sub0_minor
, sub1_minor
, sub_size
);
1695 write_limits(sub0_minor
, sub1_minor
, sub_size
);
1698 close_device(sub0_minor
);
1699 close_device(sub1_minor
);
1701 free_dma_memory(buf_ptr
, buf_size
);
1704 static void part_limits(void)
1706 /* Test reads and writes up to, across, and beyond partition limits.
1707 * As a side effect, test reading and writing partition sizes and
1708 * rereading partition tables.
1710 dev_t par
, sub0_minor
, sub1_minor
;
1712 /* First determine the first two subpartitions of the partition that we
1713 * are operating on. If we are already operating on a subpartition, we
1714 * cannot conduct this test.
1716 if (driver_minor
>= MINOR_d0p0s0
) {
1717 output("WARNING: operating on subpartition, "
1718 "skipping partition tests\n");
1721 par
= driver_minor
% DEV_PER_DRIVE
;
1722 if (par
> 0) /* adapted from libdriver's drvlib code */
1723 sub0_minor
= MINOR_d0p0s0
+ ((driver_minor
/ DEV_PER_DRIVE
) *
1724 NR_PARTITIONS
+ par
- 1) * NR_PARTITIONS
;
1726 sub0_minor
= driver_minor
+ 1;
1727 sub1_minor
= sub0_minor
+ 1;
1729 #define PART_SECS 9 /* sectors in each partition. must be >= 4. */
1731 /* First try the test with temporarily specified subpartitions. */
1732 vir_limits(sub0_minor
, sub1_minor
, PART_SECS
);
1734 /* Then, if we're allowed to write, try the test with real, persisted
1737 real_limits(sub0_minor
, sub1_minor
, PART_SECS
- 1);
1741 static void unaligned_size_io(u64_t base_pos
, u8_t
*buf_ptr
, size_t buf_size
,
1742 u8_t
*sec_ptr
[2], int sectors
, int pattern
, u32_t ssum
[5])
1744 /* Perform a single small-element I/O read, write, readback test.
1745 * The number of sectors and the pattern varies with each call.
1746 * The ssum array has to be updated to reflect the five sectors'
1747 * checksums on disk, if writing is enabled. Note that for
1749 iovec_t iov
[3], iovt
[3];
1755 base_pos
+= sector_size
;
1756 total_size
= sector_size
* sectors
;
1758 /* If the limit is two elements per sector, we cannot test three
1759 * elements in a single sector.
1761 if (sector_size
/ element_size
== 2 && sectors
== 1 && pattern
== 2)
1764 /* Set up the buffers and I/O vector. We use different buffers for the
1765 * elements to minimize the chance that something "accidentally" goes
1766 * right, but that means we have to do memory copying to do checksum
1769 fill_rand(sec_ptr
[0], sector_size
);
1771 get_sum(sec_ptr
[0] + element_size
, sector_size
- element_size
);
1773 fill_rand(buf_ptr
, buf_size
);
1777 /* First pattern: a small element on the left. */
1778 iovt
[0].iov_addr
= (vir_bytes
) sec_ptr
[0];
1779 iovt
[0].iov_size
= element_size
;
1781 iovt
[1].iov_addr
= (vir_bytes
) buf_ptr
;
1782 iovt
[1].iov_size
= total_size
- element_size
;
1783 rsum
[1] = get_sum(buf_ptr
+ iovt
[1].iov_size
, element_size
);
1788 /* Second pattern: a small element on the right. */
1789 iovt
[0].iov_addr
= (vir_bytes
) buf_ptr
;
1790 iovt
[0].iov_size
= total_size
- element_size
;
1791 rsum
[1] = get_sum(buf_ptr
+ iovt
[0].iov_size
, element_size
);
1793 iovt
[1].iov_addr
= (vir_bytes
) sec_ptr
[0];
1794 iovt
[1].iov_size
= element_size
;
1799 /* Third pattern: a small element on each side. */
1800 iovt
[0].iov_addr
= (vir_bytes
) sec_ptr
[0];
1801 iovt
[0].iov_size
= element_size
;
1803 iovt
[1].iov_addr
= (vir_bytes
) buf_ptr
;
1804 iovt
[1].iov_size
= total_size
- element_size
* 2;
1805 rsum
[1] = get_sum(buf_ptr
+ iovt
[1].iov_size
,
1808 fill_rand(sec_ptr
[1], sector_size
);
1809 iovt
[2].iov_addr
= (vir_bytes
) sec_ptr
[1];
1810 iovt
[2].iov_size
= element_size
;
1811 rsum
[2] = get_sum(sec_ptr
[1] + element_size
,
1812 sector_size
- element_size
);
1820 /* Perform a read with small elements, and test whether the result is
1823 memcpy(iov
, iovt
, sizeof(iov
));
1824 vir_xfer(driver_minor
, base_pos
, iov
, nr_req
, FALSE
, total_size
, &res
);
1826 test_sum(sec_ptr
[0] + element_size
, sector_size
- element_size
,
1827 rsum
[0], TRUE
, &res
);
1831 test_sum(buf_ptr
+ iovt
[1].iov_size
, element_size
, rsum
[1],
1833 memmove(buf_ptr
+ element_size
, buf_ptr
, iovt
[1].iov_size
);
1834 memcpy(buf_ptr
, sec_ptr
[0], element_size
);
1837 test_sum(buf_ptr
+ iovt
[0].iov_size
, element_size
, rsum
[1],
1839 memcpy(buf_ptr
+ iovt
[0].iov_size
, sec_ptr
[0], element_size
);
1842 test_sum(buf_ptr
+ iovt
[1].iov_size
, element_size
* 2, rsum
[1],
1844 test_sum(sec_ptr
[1] + element_size
, sector_size
- element_size
,
1845 rsum
[2], TRUE
, &res
);
1846 memmove(buf_ptr
+ element_size
, buf_ptr
, iovt
[1].iov_size
);
1847 memcpy(buf_ptr
, sec_ptr
[0], element_size
);
1848 memcpy(buf_ptr
+ element_size
+ iovt
[1].iov_size
, sec_ptr
[1],
1854 for (i
= 0; i
< sectors
; i
++)
1855 test_sum(buf_ptr
+ sector_size
* i
, sector_size
, ssum
[1 + i
],
1858 got_result(&res
, "read with small elements");
1860 /* In read-only mode, we have nothing more to do. */
1864 /* Use the same I/O vector to perform a write with small elements.
1865 * This will cause the checksums of the target sectors to change,
1866 * so we need to update those for both verification and later usage.
1868 for (i
= 0; i
< sectors
; i
++)
1870 fill_rand(buf_ptr
+ sector_size
* i
, sector_size
);
1874 memcpy(sec_ptr
[0], buf_ptr
, element_size
);
1875 memmove(buf_ptr
, buf_ptr
+ element_size
, iovt
[1].iov_size
);
1876 fill_rand(buf_ptr
+ iovt
[1].iov_size
, element_size
);
1879 memcpy(sec_ptr
[0], buf_ptr
+ iovt
[0].iov_size
, element_size
);
1880 fill_rand(buf_ptr
+ iovt
[0].iov_size
, element_size
);
1883 memcpy(sec_ptr
[0], buf_ptr
, element_size
);
1884 memcpy(sec_ptr
[1], buf_ptr
+ element_size
+ iovt
[1].iov_size
,
1886 memmove(buf_ptr
, buf_ptr
+ element_size
, iovt
[1].iov_size
);
1887 fill_rand(buf_ptr
+ iovt
[1].iov_size
, element_size
* 2);
1891 memcpy(iov
, iovt
, sizeof(iov
));
1893 vir_xfer(driver_minor
, base_pos
, iov
, nr_req
, TRUE
, total_size
, &res
);
1895 got_result(&res
, "write with small elements");
1897 /* Now perform normal readback verification. */
1898 fill_rand(buf_ptr
, sector_size
* 3);
1900 simple_xfer(driver_minor
, base_pos
, buf_ptr
, sector_size
* 3, FALSE
,
1901 sector_size
* 3, &res
);
1903 for (i
= 0; i
< 3; i
++)
1904 test_sum(buf_ptr
+ sector_size
* i
, sector_size
, ssum
[1 + i
],
1907 got_result(&res
, "readback verification");
1910 static void unaligned_size(void)
1912 /* Test sector-unaligned sizes in I/O vector elements. The total size
1913 * of the request, however, has to add up to the sector size.
1915 u8_t
*buf_ptr
, *sec_ptr
[2];
1917 u32_t sum
= 0L, ssum
[5];
1922 test_group("sector-unaligned elements", sector_size
!= element_size
);
1924 /* We can only do this test if the driver allows small elements. */
1925 if (sector_size
== element_size
)
1928 /* Crashing on bad user input, terrible! */
1929 assert(sector_size
% element_size
== 0);
1931 /* Establish a baseline by writing and reading back five sectors; or
1932 * by reading only, if writing is disabled.
1934 buf_size
= sector_size
* 5;
1936 base_pos
= (u64_t
)sector_size
* 2;
1938 buf_ptr
= alloc_dma_memory(buf_size
);
1939 sec_ptr
[0] = alloc_dma_memory(sector_size
);
1940 sec_ptr
[1] = alloc_dma_memory(sector_size
);
1943 sum
= fill_rand(buf_ptr
, buf_size
);
1945 for (i
= 0; i
< 5; i
++)
1946 ssum
[i
] = get_sum(buf_ptr
+ sector_size
* i
,
1949 simple_xfer(driver_minor
, base_pos
, buf_ptr
, buf_size
, TRUE
,
1952 got_result(&res
, "write several sectors");
1955 fill_rand(buf_ptr
, buf_size
);
1957 simple_xfer(driver_minor
, base_pos
, buf_ptr
, buf_size
, FALSE
, buf_size
,
1961 test_sum(buf_ptr
, buf_size
, sum
, TRUE
, &res
);
1964 for (i
= 0; i
< 5; i
++)
1965 ssum
[i
] = get_sum(buf_ptr
+ sector_size
* i
,
1969 got_result(&res
, "read several sectors");
1971 /* We do nine subtests. The first three involve only the second sector;
1972 * the second three involve the second and third sectors, and the third
1973 * three involve all of the middle sectors. Each triplet tests small
1974 * elements at the left, at the right, and at both the left and the
1975 * right of the area. For each operation, we first do an unaligned
1976 * read, and if writing is enabled, an unaligned write and an aligned
1979 for (i
= 0; i
< 9; i
++) {
1980 unaligned_size_io(base_pos
, buf_ptr
, buf_size
, sec_ptr
,
1981 i
/ 3 + 1, i
% 3, ssum
);
1984 /* If writing was enabled, make sure that the first and fifth sector
1985 * have remained untouched.
1988 fill_rand(buf_ptr
, buf_size
);
1990 simple_xfer(driver_minor
, base_pos
, buf_ptr
, buf_size
, FALSE
,
1993 test_sum(buf_ptr
, sector_size
, ssum
[0], TRUE
, &res
);
1994 test_sum(buf_ptr
+ sector_size
* 4, sector_size
, ssum
[4], TRUE
,
1997 got_result(&res
, "check first and last sectors");
2001 free_dma_memory(sec_ptr
[1], sector_size
);
2002 free_dma_memory(sec_ptr
[0], sector_size
);
2003 free_dma_memory(buf_ptr
, buf_size
);
2006 static void unaligned_pos1(void)
2008 /* Test sector-unaligned positions and total sizes for requests. This
2009 * is a read-only test for now. Write support should be added later.
2010 * In the current context, the term "lead" means an unwanted first part
2011 * of a sector, and "trail" means an unwanted last part of a sector.
2013 u8_t
*buf_ptr
, *buf2_ptr
;
2014 size_t buf_size
, buf2_size
, size
;
2019 test_group("sector-unaligned positions, part one",
2020 min_read
!= sector_size
);
2022 /* We can only do this test if the driver allows small read requests.
2024 if (min_read
== sector_size
)
2027 assert(sector_size
% min_read
== 0);
2028 assert(min_read
% element_size
== 0);
2030 /* Establish a baseline by writing and reading back three sectors; or
2031 * by reading only, if writing is disabled.
2033 buf_size
= buf2_size
= sector_size
* 3;
2035 base_pos
= (u64_t
)sector_size
* 3;
2037 buf_ptr
= alloc_dma_memory(buf_size
);
2038 buf2_ptr
= alloc_dma_memory(buf2_size
);
2041 sum
= fill_rand(buf_ptr
, buf_size
);
2043 simple_xfer(driver_minor
, base_pos
, buf_ptr
, buf_size
, TRUE
,
2046 got_result(&res
, "write several sectors");
2049 fill_rand(buf_ptr
, buf_size
);
2051 simple_xfer(driver_minor
, base_pos
, buf_ptr
, buf_size
, FALSE
, buf_size
,
2055 test_sum(buf_ptr
, buf_size
, sum
, TRUE
, &res
);
2057 got_result(&res
, "read several sectors");
2059 /* Start with a simple test that operates within a single sector,
2060 * first using a lead.
2062 fill_rand(buf2_ptr
, sector_size
);
2063 sum
= get_sum(buf2_ptr
+ min_read
, sector_size
- min_read
);
2065 simple_xfer(driver_minor
, base_pos
+ sector_size
- min_read
,
2066 buf2_ptr
, min_read
, FALSE
, min_read
, &res
);
2068 test_sum(buf2_ptr
, min_read
, get_sum(buf_ptr
+ sector_size
- min_read
,
2069 min_read
), TRUE
, &res
);
2070 test_sum(buf2_ptr
+ min_read
, sector_size
- min_read
, sum
, TRUE
,
2073 got_result(&res
, "single sector read with lead");
2076 fill_rand(buf2_ptr
, sector_size
);
2077 sum
= get_sum(buf2_ptr
, sector_size
- min_read
);
2079 simple_xfer(driver_minor
, base_pos
, buf2_ptr
+ sector_size
- min_read
,
2080 min_read
, FALSE
, min_read
, &res
);
2082 test_sum(buf2_ptr
+ sector_size
- min_read
, min_read
, get_sum(buf_ptr
,
2083 min_read
), TRUE
, &res
);
2084 test_sum(buf2_ptr
, sector_size
- min_read
, sum
, TRUE
, &res
);
2086 got_result(&res
, "single sector read with trail");
2088 /* And then a lead and a trail, unless min_read is half the sector
2089 * size, in which case this will be another lead test.
2091 fill_rand(buf2_ptr
, sector_size
);
2092 sum
= get_sum(buf2_ptr
, min_read
);
2093 sum2
= get_sum(buf2_ptr
+ min_read
* 2, sector_size
- min_read
* 2);
2095 simple_xfer(driver_minor
, base_pos
+ min_read
, buf2_ptr
+ min_read
,
2096 min_read
, FALSE
, min_read
, &res
);
2098 test_sum(buf2_ptr
+ min_read
, min_read
, get_sum(buf_ptr
+ min_read
,
2099 min_read
), TRUE
, &res
);
2100 test_sum(buf2_ptr
, min_read
, sum
, TRUE
, &res
);
2101 test_sum(buf2_ptr
+ min_read
* 2, sector_size
- min_read
* 2, sum2
,
2104 got_result(&res
, "single sector read with lead and trail");
2106 /* Now do the same but with three sectors, and still only one I/O
2107 * vector element. First up: lead.
2109 size
= min_read
+ sector_size
* 2;
2111 fill_rand(buf2_ptr
, buf2_size
);
2112 sum
= get_sum(buf2_ptr
+ size
, buf2_size
- size
);
2114 simple_xfer(driver_minor
, base_pos
+ sector_size
- min_read
, buf2_ptr
,
2115 size
, FALSE
, size
, &res
);
2117 test_sum(buf2_ptr
, size
, get_sum(buf_ptr
+ sector_size
- min_read
,
2119 test_sum(buf2_ptr
+ size
, buf2_size
- size
, sum
, TRUE
, &res
);
2121 got_result(&res
, "multisector read with lead");
2124 fill_rand(buf2_ptr
, buf2_size
);
2125 sum
= get_sum(buf2_ptr
+ size
, buf2_size
- size
);
2127 simple_xfer(driver_minor
, base_pos
, buf2_ptr
, size
, FALSE
, size
, &res
);
2129 test_sum(buf2_ptr
, size
, get_sum(buf_ptr
, size
), TRUE
, &res
);
2130 test_sum(buf2_ptr
+ size
, buf2_size
- size
, sum
, TRUE
, &res
);
2132 got_result(&res
, "multisector read with trail");
2134 /* Then lead and trail. Use sector size as transfer unit to throw off
2135 * simplistic lead/trail detection.
2137 fill_rand(buf2_ptr
, buf2_size
);
2138 sum
= get_sum(buf2_ptr
+ sector_size
, buf2_size
- sector_size
);
2140 simple_xfer(driver_minor
, base_pos
+ min_read
, buf2_ptr
, sector_size
,
2141 FALSE
, sector_size
, &res
);
2143 test_sum(buf2_ptr
, sector_size
, get_sum(buf_ptr
+ min_read
,
2144 sector_size
), TRUE
, &res
);
2145 test_sum(buf2_ptr
+ sector_size
, buf2_size
- sector_size
, sum
, TRUE
,
2148 got_result(&res
, "multisector read with lead and trail");
2151 free_dma_memory(buf2_ptr
, buf2_size
);
2152 free_dma_memory(buf_ptr
, buf_size
);
2155 static void unaligned_pos2(void)
2157 /* Test sector-unaligned positions and total sizes for requests, second
2158 * part. This one tests the use of multiple I/O vector elements, and
2159 * tries to push the limits of the driver by completely filling an I/O
2160 * vector and going up to the maximum request size.
2162 u8_t
*buf_ptr
, *buf2_ptr
;
2163 size_t buf_size
, buf2_size
, max_block
;
2164 u32_t sum
= 0L, sum2
= 0L, rsum
[NR_IOREQS
];
2166 iovec_t iov
[NR_IOREQS
];
2170 test_group("sector-unaligned positions, part two",
2171 min_read
!= sector_size
);
2173 /* We can only do this test if the driver allows small read requests.
2175 if (min_read
== sector_size
)
2178 buf_size
= buf2_size
= max_size
+ sector_size
;
2180 base_pos
= (u64_t
)sector_size
* 3;
2182 buf_ptr
= alloc_dma_memory(buf_size
);
2183 buf2_ptr
= alloc_dma_memory(buf2_size
);
2185 /* First establish a baseline. We need two requests for this, as the
2186 * total area intentionally exceeds the max request size.
2189 sum
= fill_rand(buf_ptr
, max_size
);
2191 simple_xfer(driver_minor
, base_pos
, buf_ptr
, max_size
, TRUE
,
2194 got_result(&res
, "large baseline write");
2196 sum2
= fill_rand(buf_ptr
+ max_size
, sector_size
);
2198 simple_xfer(driver_minor
, base_pos
+ max_size
,
2199 buf_ptr
+ max_size
, sector_size
, TRUE
, sector_size
,
2202 got_result(&res
, "small baseline write");
2205 fill_rand(buf_ptr
, buf_size
);
2207 simple_xfer(driver_minor
, base_pos
, buf_ptr
, max_size
, FALSE
, max_size
,
2211 test_sum(buf_ptr
, max_size
, sum
, TRUE
, &res
);
2213 got_result(&res
, "large baseline read");
2215 simple_xfer(driver_minor
, base_pos
+ max_size
, buf_ptr
+ max_size
,
2216 sector_size
, FALSE
, sector_size
, &res
);
2219 test_sum(buf_ptr
+ max_size
, sector_size
, sum2
, TRUE
, &res
);
2221 got_result(&res
, "small baseline read");
2223 /* First construct a full vector with minimal sizes. The resulting area
2224 * may well fall within a single sector, if min_read is small enough.
2226 fill_rand(buf2_ptr
, buf2_size
);
2228 for (i
= 0; i
< NR_IOREQS
; i
++) {
2229 iov
[i
].iov_addr
= (vir_bytes
) buf2_ptr
+ i
* sector_size
;
2230 iov
[i
].iov_size
= min_read
;
2232 rsum
[i
] = get_sum(buf2_ptr
+ i
* sector_size
+ min_read
,
2233 sector_size
- min_read
);
2236 vir_xfer(driver_minor
, base_pos
+ min_read
, iov
, NR_IOREQS
, FALSE
,
2237 min_read
* NR_IOREQS
, &res
);
2239 for (i
= 0; i
< NR_IOREQS
; i
++) {
2240 test_sum(buf2_ptr
+ i
* sector_size
+ min_read
,
2241 sector_size
- min_read
, rsum
[i
], TRUE
, &res
);
2242 memmove(buf2_ptr
+ i
* min_read
, buf2_ptr
+ i
* sector_size
,
2246 test_sum(buf2_ptr
, min_read
* NR_IOREQS
, get_sum(buf_ptr
+ min_read
,
2247 min_read
* NR_IOREQS
), TRUE
, &res
);
2249 got_result(&res
, "small fully unaligned filled vector");
2251 /* Sneak in a maximum sized request with a single I/O vector element,
2252 * unaligned. If the driver splits up such large requests into smaller
2253 * chunks, this tests whether it does so correctly in the presence of
2256 fill_rand(buf2_ptr
, buf2_size
);
2258 simple_xfer(driver_minor
, base_pos
+ min_read
, buf2_ptr
, max_size
,
2259 FALSE
, max_size
, &res
);
2261 test_sum(buf2_ptr
, max_size
, get_sum(buf_ptr
+ min_read
, max_size
),
2264 got_result(&res
, "large fully unaligned single element");
2266 /* Then try with a vector where each element is as large as possible.
2267 * We don't have room to do bounds integrity checking here (we could
2268 * make room, but this may be a lot of memory already).
2270 /* Compute the largest sector multiple which, when multiplied by
2271 * NR_IOREQS, is no more than the maximum transfer size.
2273 max_block
= max_size
/ NR_IOREQS
;
2274 max_block
-= max_block
% sector_size
;
2276 fill_rand(buf2_ptr
, buf2_size
);
2278 for (i
= 0; i
< NR_IOREQS
; i
++) {
2279 iov
[i
].iov_addr
= (vir_bytes
) buf2_ptr
+ i
* max_block
;
2280 iov
[i
].iov_size
= max_block
;
2283 vir_xfer(driver_minor
, base_pos
+ min_read
, iov
, NR_IOREQS
, FALSE
,
2284 max_block
* NR_IOREQS
, &res
);
2286 test_sum(buf2_ptr
, max_block
* NR_IOREQS
, get_sum(buf_ptr
+ min_read
,
2287 max_block
* NR_IOREQS
), TRUE
, &res
);
2289 got_result(&res
, "large fully unaligned filled vector");
2292 free_dma_memory(buf2_ptr
, buf2_size
);
2293 free_dma_memory(buf_ptr
, buf_size
);
2296 static void sweep_area(u64_t base_pos
)
2298 /* Go over an eight-sector area from left (low address) to right (high
2299 * address), reading and optionally writing in three-sector chunks, and
2300 * advancing one sector at a time.
2304 u32_t sum
= 0L, ssum
[8];
2308 buf_size
= sector_size
* 8;
2309 buf_ptr
= alloc_dma_memory(buf_size
);
2311 /* First (write to, if allowed, and) read from the entire area in one
2312 * go, so that we know the (initial) contents of the area.
2315 sum
= fill_rand(buf_ptr
, buf_size
);
2317 simple_xfer(driver_minor
, base_pos
, buf_ptr
, buf_size
, TRUE
,
2320 got_result(&res
, "write to full area");
2323 fill_rand(buf_ptr
, buf_size
);
2325 simple_xfer(driver_minor
, base_pos
, buf_ptr
, buf_size
, FALSE
, buf_size
,
2329 test_sum(buf_ptr
, buf_size
, sum
, TRUE
, &res
);
2331 for (i
= 0; i
< 8; i
++)
2332 ssum
[i
] = get_sum(buf_ptr
+ sector_size
* i
, sector_size
);
2334 got_result(&res
, "read from full area");
2336 /* For each of the six three-sector subareas, first read from the
2337 * subarea, check its checksum, and then (if allowed) write new content
2340 for (i
= 0; i
< 6; i
++) {
2341 fill_rand(buf_ptr
, sector_size
* 3);
2343 simple_xfer(driver_minor
, base_pos
+ sector_size
* i
, buf_ptr
,
2344 sector_size
* 3, FALSE
, sector_size
* 3, &res
);
2346 for (j
= 0; j
< 3; j
++)
2347 test_sum(buf_ptr
+ sector_size
* j
, sector_size
,
2348 ssum
[i
+ j
], TRUE
, &res
);
2350 got_result(&res
, "read from subarea");
2355 fill_rand(buf_ptr
, sector_size
* 3);
2357 simple_xfer(driver_minor
, base_pos
+ sector_size
* i
, buf_ptr
,
2358 sector_size
* 3, TRUE
, sector_size
* 3, &res
);
2360 for (j
= 0; j
< 3; j
++)
2361 ssum
[i
+ j
] = get_sum(buf_ptr
+ sector_size
* j
,
2364 got_result(&res
, "write to subarea");
2367 /* Finally, if writing was enabled, do one final readback. */
2369 fill_rand(buf_ptr
, buf_size
);
2371 simple_xfer(driver_minor
, base_pos
, buf_ptr
, buf_size
, FALSE
,
2374 for (i
= 0; i
< 8; i
++)
2375 test_sum(buf_ptr
+ sector_size
* i
, sector_size
,
2376 ssum
[i
], TRUE
, &res
);
2378 got_result(&res
, "readback from full area");
2382 free_dma_memory(buf_ptr
, buf_size
);
2385 static void sweep_and_check(u64_t pos
, int check_integ
)
2387 /* Perform an area sweep at the given position. If asked for, get an
2388 * integrity checksum over the beginning of the disk (first writing
2389 * known data into it if that is allowed) before doing the sweep, and
2390 * test the integrity checksum against the disk contents afterwards.
2398 buf_size
= sector_size
* 3;
2399 buf_ptr
= alloc_dma_memory(buf_size
);
2402 sum
= fill_rand(buf_ptr
, buf_size
);
2404 simple_xfer(driver_minor
, 0ULL, buf_ptr
, buf_size
,
2405 TRUE
, buf_size
, &res
);
2407 got_result(&res
, "write integrity zone");
2410 fill_rand(buf_ptr
, buf_size
);
2412 simple_xfer(driver_minor
, 0ULL, buf_ptr
, buf_size
, FALSE
,
2416 test_sum(buf_ptr
, buf_size
, sum
, TRUE
, &res
);
2418 sum
= get_sum(buf_ptr
, buf_size
);
2420 got_result(&res
, "read integrity zone");
2426 fill_rand(buf_ptr
, buf_size
);
2428 simple_xfer(driver_minor
, 0ULL, buf_ptr
, buf_size
, FALSE
,
2431 test_sum(buf_ptr
, buf_size
, sum
, TRUE
, &res
);
2433 got_result(&res
, "check integrity zone");
2435 free_dma_memory(buf_ptr
, buf_size
);
2439 static void basic_sweep(void)
2441 /* Perform a basic area sweep.
2444 test_group("basic area sweep", TRUE
);
2446 sweep_area((u64_t
)sector_size
);
2449 static void high_disk_pos(void)
2451 /* Test 64-bit absolute disk positions. This means that after adding
2452 * partition base to the given position, the driver will be dealing
2453 * with a position above 32 bit. We want to test the transition area
2454 * only; if the entire partition base is above 32 bit, we have already
2455 * effectively performed this test many times over. In other words, for
2456 * this test, the partition must start below 4GB and end above 4GB,
2457 * with at least four sectors on each side.
2461 base_pos
= 0x100000000ULL
| (sector_size
* 4);
2462 base_pos
-= base_pos
% sector_size
;
2464 /* The partition end must exceed 32 bits. */
2465 if (part
.base
+ part
.size
< base_pos
) {
2466 test_group("high disk positions", FALSE
);
2471 base_pos
-= sector_size
* 8;
2473 /* The partition start must not. */
2474 if (base_pos
< part
.base
) {
2475 test_group("high disk positions", FALSE
);
2479 test_group("high disk positions", TRUE
);
2481 base_pos
-= part
.base
;
2483 sweep_and_check(base_pos
, part
.base
== 0ULL);
2486 static void high_part_pos(void)
2488 /* Test 64-bit partition-relative disk positions. In other words, use
2489 * within the current partition a position that exceeds a 32-bit value.
2490 * This requires the partition to be more than 4GB in size; we need an
2491 * additional 4 sectors, to be exact.
2495 /* If the partition starts at the beginning of the disk, this test is
2496 * no different from the high disk position test.
2498 if (part
.base
== 0ULL) {
2499 /* don't complain: the test is simply superfluous now */
2503 base_pos
= 0x100000000ULL
| (sector_size
* 4);
2504 base_pos
-= base_pos
% sector_size
;
2506 if (part
.size
< base_pos
) {
2507 test_group("high partition positions", FALSE
);
2512 test_group("high partition positions", TRUE
);
2514 base_pos
-= sector_size
* 8;
2516 sweep_and_check(base_pos
, TRUE
);
2519 static void high_lba_pos1(void)
2521 /* Test 48-bit LBA positions, as opposed to *24-bit*. Drivers that only
2522 * support 48-bit LBA ATA transfers, will treat the lower and upper 24
2523 * bits differently. This is again relative to the disk start, not the
2524 * partition start. For 512-byte sectors, the lowest position exceeding
2525 * 24 bit is at 8GB. As usual, we need four sectors more, and fewer, on
2526 * the other side. The partition that we're operating on, must cover
2531 base_pos
= (1ULL << 24) * sector_size
;
2533 /* The partition end must exceed the 24-bit sector point. */
2534 if (part
.base
+ part
.size
< base_pos
) {
2535 test_group("high LBA positions, part one", FALSE
);
2540 base_pos
-= sector_size
* 8;
2542 /* The partition start must not. */
2543 if (base_pos
< part
.base
) {
2544 test_group("high LBA positions, part one", FALSE
);
2549 test_group("high LBA positions, part one", TRUE
);
2551 base_pos
-= part
.base
;
2553 sweep_and_check(base_pos
, part
.base
== 0ULL);
2556 static void high_lba_pos2(void)
2558 /* Test 48-bit LBA positions, as opposed to *28-bit*. That means sector
2559 * numbers in excess of 28-bit values; the old ATA upper limit. The
2560 * same considerations as above apply, except that we now need a 128+GB
2565 base_pos
= (1ULL << 28) * sector_size
;
2567 /* The partition end must exceed the 28-bit sector point. */
2568 if (part
.base
+ part
.size
< base_pos
) {
2569 test_group("high LBA positions, part two", FALSE
);
2574 base_pos
-= sector_size
* 8;
2576 /* The partition start must not. */
2577 if (base_pos
< part
.base
) {
2578 test_group("high LBA positions, part two", FALSE
);
2583 test_group("high LBA positions, part two", TRUE
);
2585 base_pos
-= part
.base
;
2587 sweep_and_check(base_pos
, part
.base
== 0ULL);
2590 static void high_pos(void)
2592 /* Check whether the driver deals well with 64-bit positions and
2593 * 48-bit LBA addresses. We test three cases: disk byte position beyond
2594 * what fits in 32 bit, in-partition byte position beyond what fits in
2595 * 32 bit, and disk sector position beyond what fits in 24 bit. With
2596 * the partition we've been given, we may not be able to test all of
2597 * them (or any, for that matter).
2599 /* In certain rare cases, we might be able to perform integrity
2600 * checking on the area that would be affected if a 32-bit/24-bit
2601 * counter were to wrap. More specifically: we can do that if we can
2602 * access the start of the disk. This is why we should be given the
2603 * entire disk as test area if at all possible.
2617 static void open_primary(void)
2619 /* Open the primary device. This call has its own test group.
2622 test_group("device open", TRUE
);
2624 open_device(driver_minor
);
2627 static void close_primary(void)
2629 /* Close the primary device. This call has its own test group.
2632 test_group("device close", TRUE
);
2634 close_device(driver_minor
);
2636 assert(nr_opened
== 0);
2639 static void do_tests(void)
2641 /* Perform all the tests.
2652 /* It is assumed that the driver implementation uses shared
2653 * code paths for read and write for the basic checks, so we do
2654 * not repeat those for writes.
2673 static int sef_cb_init_fresh(int UNUSED(type
), sef_init_info_t
*UNUSED(info
))
2679 optset_parse(optset_table
, env_argv
[1]);
2681 if (driver_label
[0] == '\0')
2682 panic("no driver label given");
2684 if (ds_retrieve_label_endpt(driver_label
, &driver_endpt
))
2685 panic("unable to resolve driver label");
2687 if (driver_minor
> 255)
2688 panic("invalid or no driver minor given");
2690 srand48(getticks());
2692 output("BLOCKTEST: driver label '%s' (endpt %d), minor %d\n",
2693 driver_label
, driver_endpt
, driver_minor
);
2697 output("BLOCKTEST: summary: %d out of %d tests failed "
2698 "across %d group%s; %d driver deaths\n",
2699 failed_tests
, total_tests
, failed_groups
,
2700 failed_groups
== 1 ? "" : "s", driver_deaths
);
2702 /* The returned code will determine the outcome of the RS call, and
2703 * thus the entire test. The actual error code does not matter.
2705 return (failed_tests
) ? EINVAL
: OK
;
2708 static void sef_local_startup(void)
2710 /* Initialize the SEF framework.
2713 sef_setcb_init_fresh(sef_cb_init_fresh
);
2718 int main(int argc
, char **argv
)
2723 env_setargs(argc
, argv
);
2724 sef_local_startup();