1 /* Block Device Driver Test driver, by D.C. van Moolenbroek */
3 #include <minix/blockdriver.h>
4 #include <minix/drvlib.h>
6 #include <minix/optset.h>
7 #include <sys/ioc_disk.h>
11 RESULT_OK
, /* exactly as expected */
12 RESULT_DEATH
, /* driver died */
13 RESULT_COMMFAIL
, /* communication failed */
14 RESULT_BADTYPE
, /* bad type in message */
15 RESULT_BADID
, /* bad request ID in message */
16 RESULT_BADSTATUS
, /* bad/unexpected status in message */
17 RESULT_TRUNC
, /* request truncated unexpectedly */
18 RESULT_CORRUPT
, /* buffer touched erroneously */
19 RESULT_MISSING
, /* buffer left untouched erroneously */
20 RESULT_OVERFLOW
, /* area around buffer touched */
21 RESULT_BADVALUE
/* bad/unexpected return value */
29 static char driver_label
[32] = ""; /* driver DS label */
30 static dev_t driver_minor
= -1; /* driver's partition minor to use */
31 static endpoint_t driver_endpt
; /* driver endpoint */
33 static int may_write
= FALSE
; /* may we write to the device? */
34 static int sector_size
= 512; /* size of a single disk sector */
35 static int min_read
= 512; /* minimum total size of read req */
36 static int element_size
= 512; /* minimum I/O vector element size */
37 static int max_size
= 131072; /* maximum total size of any req */
38 /* Note that we do not test exceeding the max_size limit, so it is safe to set
39 * it to a value lower than the driver supports.
42 static struct partition part
; /* base and size of target partition */
44 #define NR_OPENED 10 /* maximum number of opened devices */
45 static dev_t opened
[NR_OPENED
]; /* list of currently opened devices */
46 static int nr_opened
= 0; /* current number of opened devices */
48 static int total_tests
= 0; /* total number of tests performed */
49 static int failed_tests
= 0; /* number of tests that failed */
50 static int failed_groups
= 0; /* nr of groups that had failures */
51 static int group_failure
; /* has this group had a failure yet? */
52 static int driver_deaths
= 0; /* number of restarts that we saw */
54 /* Options supported by this driver. */
55 static struct optset optset_table
[] = {
56 { "label", OPT_STRING
, driver_label
, sizeof(driver_label
) },
57 { "minor", OPT_INT
, &driver_minor
, 10 },
58 { "rw", OPT_BOOL
, &may_write
, TRUE
},
59 { "ro", OPT_BOOL
, &may_write
, FALSE
},
60 { "sector", OPT_INT
, §or_size
, 10 },
61 { "element", OPT_INT
, &element_size
, 10 },
62 { "min_read", OPT_INT
, &min_read
, 10 },
63 { "max", OPT_INT
, &max_size
, 10 },
67 static int set_result(result_t
*res
, int type
, ssize_t value
)
69 /* Set the result to the given result type and with the given optional
70 * extra value. Return the type.
78 static int accept_result(result_t
*res
, int type
, ssize_t value
)
80 /* If the result is of the given type and value, reset it to a success
81 * result. This allows for a logical OR on error codes. Return whether
82 * the result was indeed reset.
85 if (res
->type
== type
&& res
->value
== value
) {
86 set_result(res
, RESULT_OK
, 0);
94 static void got_result(result_t
*res
, char *desc
)
96 /* Process the result of a test. Keep statistics.
101 if (res
->type
!= RESULT_OK
) {
104 if (group_failure
== FALSE
) {
106 group_failure
= TRUE
;
110 printf("#%02d: %-38s\t[%s]\n", ++i
, desc
,
111 (res
->type
== RESULT_OK
) ? "PASS" : "FAIL");
115 printf("- driver died\n");
117 case RESULT_COMMFAIL
:
118 printf("- communication failed; sendrec returned %d\n",
122 printf("- bad type %d in reply message\n", res
->value
);
125 printf("- mismatched ID %d in reply message\n", res
->value
);
127 case RESULT_BADSTATUS
:
128 printf("- bad or unexpected status %d in reply message\n",
132 printf("- result size not as expected (%u bytes left)\n",
136 printf("- buffer has been modified erroneously\n");
139 printf("- buffer has been left untouched erroneously\n");
141 case RESULT_OVERFLOW
:
142 printf("- area around target buffer modified\n");
144 case RESULT_BADVALUE
:
145 printf("- bad or unexpected return value %d from call\n",
151 static void test_group(char *name
, int exec
)
153 /* Start a new group of tests.
156 printf("Test group: %s%s\n", name
, exec
? "" : " (skipping)");
158 group_failure
= FALSE
;
161 static void reopen_device(dev_t minor
)
163 /* Reopen a device after we were notified that the driver has died.
164 * Explicitly ignore any errors here; this is a feeble attempt to get
165 * ourselves back into business again.
169 memset(&m
, 0, sizeof(m
));
170 m
.m_type
= BDEV_OPEN
;
171 m
.BDEV_MINOR
= minor
;
172 m
.BDEV_ACCESS
= (may_write
) ? (R_BIT
| W_BIT
) : R_BIT
;
175 (void) sendrec(driver_endpt
, &m
);
178 static int sendrec_driver(message
*m_ptr
, ssize_t exp
, result_t
*res
)
180 /* Make a call to the driver, and perform basic checks on the return
181 * message. Fill in the result structure, wiping out what was in there
182 * before. If the driver dies in the process, attempt to recover but
186 endpoint_t last_endpt
;
191 r
= sendrec(driver_endpt
, m_ptr
);
193 if (r
== EDEADSRCDST
) {
194 /* The driver has died. Find its new endpoint, and reopen all
195 * devices that we opened earlier. Then return failure.
197 printf("WARNING: driver has died, attempting to proceed\n");
201 /* Keep trying until we get a new endpoint. */
202 last_endpt
= driver_endpt
;
204 r
= ds_retrieve_label_endpt(driver_label
,
207 if (r
== OK
&& last_endpt
!= driver_endpt
)
213 for (i
= 0; i
< nr_opened
; i
++)
214 reopen_device(opened
[i
]);
216 return set_result(res
, RESULT_DEATH
, 0);
220 return set_result(res
, RESULT_COMMFAIL
, r
);
222 if (m_ptr
->m_type
!= BDEV_REPLY
)
223 return set_result(res
, RESULT_BADTYPE
, m_ptr
->m_type
);
225 if (m_ptr
->BDEV_ID
!= m_orig
.BDEV_ID
)
226 return set_result(res
, RESULT_BADID
, m_ptr
->BDEV_ID
);
228 if ((exp
< 0 && m_ptr
->BDEV_STATUS
>= 0) ||
229 (exp
>= 0 && m_ptr
->BDEV_STATUS
< 0))
230 return set_result(res
, RESULT_BADSTATUS
, m_ptr
->BDEV_STATUS
);
232 return set_result(res
, RESULT_OK
, 0);
235 static void raw_xfer(dev_t minor
, u64_t pos
, iovec_s_t
*iovec
, int nr_req
,
236 int write
, ssize_t exp
, result_t
*res
)
238 /* Perform a transfer with a safecopy iovec already supplied.
244 assert(nr_req
<= NR_IOREQS
);
245 assert(!write
|| may_write
);
247 if ((grant
= cpf_grant_direct(driver_endpt
, (vir_bytes
) iovec
,
248 sizeof(*iovec
) * nr_req
, CPF_READ
)) == GRANT_INVALID
)
249 panic("unable to allocate grant");
251 memset(&m
, 0, sizeof(m
));
252 m
.m_type
= write
? BDEV_SCATTER
: BDEV_GATHER
;
253 m
.BDEV_MINOR
= minor
;
254 m
.BDEV_POS_LO
= ex64lo(pos
);
255 m
.BDEV_POS_HI
= ex64hi(pos
);
256 m
.BDEV_COUNT
= nr_req
;
257 m
.BDEV_GRANT
= grant
;
258 m
.BDEV_ID
= lrand48();
260 r
= sendrec_driver(&m
, exp
, res
);
262 if (cpf_revoke(grant
) != OK
)
263 panic("unable to revoke grant");
268 if (m
.BDEV_STATUS
== exp
)
272 set_result(res
, RESULT_BADSTATUS
, m
.BDEV_STATUS
);
274 set_result(res
, RESULT_TRUNC
, exp
- m
.BDEV_STATUS
);
277 static void vir_xfer(dev_t minor
, u64_t pos
, iovec_t
*iovec
, int nr_req
,
278 int write
, ssize_t exp
, result_t
*res
)
280 /* Perform a transfer, creating and revoking grants for the I/O vector.
282 iovec_s_t iov_s
[NR_IOREQS
];
285 assert(nr_req
<= NR_IOREQS
);
287 for (i
= 0; i
< nr_req
; i
++) {
288 iov_s
[i
].iov_size
= iovec
[i
].iov_size
;
290 if ((iov_s
[i
].iov_grant
= cpf_grant_direct(driver_endpt
,
291 (vir_bytes
) iovec
[i
].iov_addr
, iovec
[i
].iov_size
,
292 write
? CPF_READ
: CPF_WRITE
)) == GRANT_INVALID
)
293 panic("unable to allocate grant");
296 raw_xfer(minor
, pos
, iov_s
, nr_req
, write
, exp
, res
);
298 for (i
= 0; i
< nr_req
; i
++) {
299 iovec
[i
].iov_size
= iov_s
[i
].iov_size
;
301 if (cpf_revoke(iov_s
[i
].iov_grant
) != OK
)
302 panic("unable to revoke grant");
306 static void simple_xfer(dev_t minor
, u64_t pos
, u8_t
*buf
, size_t size
,
307 int write
, ssize_t exp
, result_t
*res
)
309 /* Perform a transfer involving a single buffer.
313 iov
.iov_addr
= (vir_bytes
) buf
;
316 vir_xfer(minor
, pos
, &iov
, 1, write
, exp
, res
);
319 static void alloc_buf_and_grant(u8_t
**ptr
, cp_grant_id_t
*grant
,
320 size_t size
, int perms
)
322 /* Allocate a buffer suitable for DMA (i.e. contiguous) and create a
323 * grant for it with the requested CPF_* grant permissions.
326 *ptr
= alloc_contig(size
, 0, NULL
);
328 panic("unable to allocate memory");
330 if ((*grant
= cpf_grant_direct(driver_endpt
, (vir_bytes
) *ptr
, size
,
331 perms
)) == GRANT_INVALID
)
332 panic("unable to allocate grant");
335 static void free_buf_and_grant(u8_t
*ptr
, cp_grant_id_t grant
, size_t size
)
337 /* Revoke a grant and free a buffer.
342 free_contig(ptr
, size
);
345 static void bad_read1(void)
347 /* Test various illegal read transfer requests, part 1.
351 cp_grant_id_t grant
, grant2
, grant3
;
356 test_group("bad read requests, part one", TRUE
);
358 #define BUF_SIZE 4096
361 alloc_buf_and_grant(&buf_ptr
, &grant2
, buf_size
, CPF_WRITE
);
363 if ((grant
= cpf_grant_direct(driver_endpt
, (vir_bytes
) &iov
,
364 sizeof(iov
), CPF_READ
)) == GRANT_INVALID
)
365 panic("unable to allocate grant");
367 /* Initialize the defaults for some of the tests.
368 * This is a legitimate request for the first block of the partition.
370 memset(&mt
, 0, sizeof(mt
));
371 mt
.m_type
= BDEV_GATHER
;
372 mt
.BDEV_MINOR
= driver_minor
;
376 mt
.BDEV_GRANT
= grant
;
377 mt
.BDEV_ID
= lrand48();
379 memset(&iovt
, 0, sizeof(iovt
));
380 iovt
.iov_grant
= grant2
;
381 iovt
.iov_size
= buf_size
;
383 /* Test normal request. */
387 sendrec_driver(&m
, OK
, &res
);
389 if (res
.type
== RESULT_OK
&& m
.BDEV_STATUS
!= (ssize_t
) iov
.iov_size
) {
390 res
.type
= RESULT_TRUNC
;
391 res
.value
= m
.BDEV_STATUS
;
394 got_result(&res
, "normal request");
396 /* Test zero iovec elements. */
402 sendrec_driver(&m
, EINVAL
, &res
);
404 got_result(&res
, "zero iovec elements");
406 /* Test bad iovec grant. */
409 m
.BDEV_GRANT
= GRANT_INVALID
;
411 sendrec_driver(&m
, EINVAL
, &res
);
413 got_result(&res
, "bad iovec grant");
415 /* Test revoked iovec grant. */
419 if ((grant3
= cpf_grant_direct(driver_endpt
, (vir_bytes
) &iov
,
420 sizeof(iov
), CPF_READ
)) == GRANT_INVALID
)
421 panic("unable to allocate grant");
425 m
.BDEV_GRANT
= grant3
;
427 sendrec_driver(&m
, EINVAL
, &res
);
429 accept_result(&res
, RESULT_BADSTATUS
, EPERM
);
431 got_result(&res
, "revoked iovec grant");
433 /* Test normal request (final check). */
437 sendrec_driver(&m
, OK
, &res
);
439 if (res
.type
== RESULT_OK
&& m
.BDEV_STATUS
!= (ssize_t
) iov
.iov_size
) {
440 res
.type
= RESULT_TRUNC
;
441 res
.value
= m
.BDEV_STATUS
;
444 got_result(&res
, "normal request");
447 free_buf_and_grant(buf_ptr
, grant2
, buf_size
);
452 static u32_t
get_sum(u8_t
*ptr
, size_t size
)
454 /* Compute a checksum over the given buffer.
458 for (sum
= 0; size
> 0; size
--, ptr
++)
459 sum
= sum
^ (sum
<< 5) ^ *ptr
;
464 static u32_t
fill_rand(u8_t
*ptr
, size_t size
)
466 /* Fill the given buffer with random data. Return a checksum over the
471 for (i
= 0; i
< size
; i
++)
472 ptr
[i
] = lrand48() % 256;
474 return get_sum(ptr
, size
);
477 static void test_sum(u8_t
*ptr
, size_t size
, u32_t sum
, int should_match
,
480 /* If the test succeeded so far, check whether the given buffer does
481 * or does not match the given checksum, and adjust the test result
486 if (res
->type
!= RESULT_OK
)
489 sum2
= get_sum(ptr
, size
);
491 if ((sum
== sum2
) != should_match
) {
492 res
->type
= should_match
? RESULT_CORRUPT
: RESULT_MISSING
;
493 res
->value
= 0; /* not much that's useful here */
497 static void bad_read2(void)
499 /* Test various illegal read transfer requests, part 2.
501 * Consider allowing this test to be run twice, with different buffer
502 * sizes. It appears that we can make at_wini misbehave by making the
503 * size exceed the per-operation size (128KB ?). On the other hand, we
504 * then need to start checking partition sizes, possibly.
506 u8_t
*buf_ptr
, *buf2_ptr
, *buf3_ptr
, c1
, c2
;
507 size_t buf_size
, buf2_size
, buf3_size
;
508 cp_grant_id_t buf_grant
, buf2_grant
, buf3_grant
, grant
;
509 u32_t buf_sum
, buf2_sum
, buf3_sum
;
510 iovec_s_t iov
[3], iovt
[3];
513 test_group("bad read requests, part two", TRUE
);
515 buf_size
= buf2_size
= buf3_size
= BUF_SIZE
;
517 alloc_buf_and_grant(&buf_ptr
, &buf_grant
, buf_size
, CPF_WRITE
);
518 alloc_buf_and_grant(&buf2_ptr
, &buf2_grant
, buf2_size
, CPF_WRITE
);
519 alloc_buf_and_grant(&buf3_ptr
, &buf3_grant
, buf3_size
, CPF_WRITE
);
521 iovt
[0].iov_grant
= buf_grant
;
522 iovt
[0].iov_size
= buf_size
;
523 iovt
[1].iov_grant
= buf2_grant
;
524 iovt
[1].iov_size
= buf2_size
;
525 iovt
[2].iov_grant
= buf3_grant
;
526 iovt
[2].iov_size
= buf3_size
;
528 /* Test normal vector request. */
529 memcpy(iov
, iovt
, sizeof(iovt
));
531 buf_sum
= fill_rand(buf_ptr
, buf_size
);
532 buf2_sum
= fill_rand(buf2_ptr
, buf2_size
);
533 buf3_sum
= fill_rand(buf3_ptr
, buf3_size
);
535 raw_xfer(driver_minor
, cvu64(0), iov
, 3, FALSE
,
536 buf_size
+ buf2_size
+ buf3_size
, &res
);
538 test_sum(buf_ptr
, buf_size
, buf_sum
, FALSE
, &res
);
539 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, FALSE
, &res
);
540 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, FALSE
, &res
);
542 got_result(&res
, "normal vector request");
544 /* Test zero sized iovec element. */
545 memcpy(iov
, iovt
, sizeof(iovt
));
548 buf_sum
= fill_rand(buf_ptr
, buf_size
);
549 buf2_sum
= fill_rand(buf2_ptr
, buf2_size
);
550 buf3_sum
= fill_rand(buf3_ptr
, buf3_size
);
552 raw_xfer(driver_minor
, cvu64(0), iov
, 3, FALSE
, EINVAL
, &res
);
554 test_sum(buf_ptr
, buf_size
, buf_sum
, TRUE
, &res
);
555 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, TRUE
, &res
);
556 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, TRUE
, &res
);
558 got_result(&res
, "zero size in iovec element");
560 /* Test negative sized iovec element. */
561 memcpy(iov
, iovt
, sizeof(iovt
));
562 iov
[1].iov_size
= (vir_bytes
) LONG_MAX
+ 1;
564 raw_xfer(driver_minor
, cvu64(0), iov
, 3, FALSE
, EINVAL
, &res
);
566 test_sum(buf_ptr
, buf_size
, buf_sum
, TRUE
, &res
);
567 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, TRUE
, &res
);
568 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, TRUE
, &res
);
570 got_result(&res
, "negative size in iovec element");
572 /* Test iovec with negative total size. */
573 memcpy(iov
, iovt
, sizeof(iovt
));
574 iov
[0].iov_size
= LONG_MAX
/ 2 - 1;
575 iov
[1].iov_size
= LONG_MAX
/ 2 - 1;
577 raw_xfer(driver_minor
, cvu64(0), iov
, 3, FALSE
, EINVAL
, &res
);
579 test_sum(buf_ptr
, buf_size
, buf_sum
, TRUE
, &res
);
580 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, TRUE
, &res
);
581 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, TRUE
, &res
);
583 got_result(&res
, "negative total size");
585 /* Test iovec with wrapping total size. */
586 memcpy(iov
, iovt
, sizeof(iovt
));
587 iov
[0].iov_size
= LONG_MAX
- 1;
588 iov
[1].iov_size
= LONG_MAX
- 1;
590 raw_xfer(driver_minor
, cvu64(0), iov
, 3, FALSE
, EINVAL
, &res
);
592 test_sum(buf_ptr
, buf_size
, buf_sum
, TRUE
, &res
);
593 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, TRUE
, &res
);
594 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, TRUE
, &res
);
596 got_result(&res
, "wrapping total size");
598 /* Test word-unaligned iovec element size. */
599 memcpy(iov
, iovt
, sizeof(iovt
));
602 buf_sum
= fill_rand(buf_ptr
, buf_size
);
603 buf2_sum
= fill_rand(buf2_ptr
, buf2_size
);
604 buf3_sum
= fill_rand(buf3_ptr
, buf3_size
);
605 c1
= buf2_ptr
[buf2_size
- 1];
607 raw_xfer(driver_minor
, cvu64(0), iov
, 3, FALSE
, BUF_SIZE
* 3 - 1,
610 if (accept_result(&res
, RESULT_BADSTATUS
, EINVAL
)) {
611 /* Do not test the first buffer, as it may contain a partial
614 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, TRUE
, &res
);
615 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, TRUE
, &res
);
617 test_sum(buf_ptr
, buf_size
, buf_sum
, FALSE
, &res
);
618 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, FALSE
, &res
);
619 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, FALSE
, &res
);
620 if (c1
!= buf2_ptr
[buf2_size
- 1])
621 set_result(&res
, RESULT_CORRUPT
, 0);
624 got_result(&res
, "word-unaligned size in iovec element");
626 /* Test invalid grant in iovec element. */
627 memcpy(iov
, iovt
, sizeof(iovt
));
628 iov
[1].iov_grant
= GRANT_INVALID
;
630 fill_rand(buf_ptr
, buf_size
);
631 buf2_sum
= fill_rand(buf2_ptr
, buf2_size
);
632 buf3_sum
= fill_rand(buf3_ptr
, buf3_size
);
634 raw_xfer(driver_minor
, cvu64(0), iov
, 3, FALSE
, EINVAL
, &res
);
636 /* Do not test the first buffer, as it may contain a partial result. */
637 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, TRUE
, &res
);
638 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, TRUE
, &res
);
640 got_result(&res
, "invalid grant in iovec element");
642 /* Test revoked grant in iovec element. */
643 memcpy(iov
, iovt
, sizeof(iovt
));
644 if ((grant
= cpf_grant_direct(driver_endpt
, (vir_bytes
) buf2_ptr
,
645 buf2_size
, CPF_WRITE
)) == GRANT_INVALID
)
646 panic("unable to allocate grant");
650 iov
[1].iov_grant
= grant
;
652 buf_sum
= fill_rand(buf_ptr
, buf_size
);
653 buf2_sum
= fill_rand(buf2_ptr
, buf2_size
);
654 buf3_sum
= fill_rand(buf3_ptr
, buf3_size
);
656 raw_xfer(driver_minor
, cvu64(0), iov
, 3, FALSE
, EINVAL
, &res
);
658 accept_result(&res
, RESULT_BADSTATUS
, EPERM
);
660 /* Do not test the first buffer, as it may contain a partial result. */
661 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, TRUE
, &res
);
662 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, TRUE
, &res
);
664 got_result(&res
, "revoked grant in iovec element");
666 /* Test read-only grant in iovec element. */
667 memcpy(iov
, iovt
, sizeof(iovt
));
668 if ((grant
= cpf_grant_direct(driver_endpt
, (vir_bytes
) buf2_ptr
,
669 buf2_size
, CPF_READ
)) == GRANT_INVALID
)
670 panic("unable to allocate grant");
672 iov
[1].iov_grant
= grant
;
674 buf_sum
= fill_rand(buf_ptr
, buf_size
);
675 buf2_sum
= fill_rand(buf2_ptr
, buf2_size
);
676 buf3_sum
= fill_rand(buf3_ptr
, buf3_size
);
678 raw_xfer(driver_minor
, cvu64(0), iov
, 3, FALSE
, EINVAL
, &res
);
680 accept_result(&res
, RESULT_BADSTATUS
, EPERM
);
682 /* Do not test the first buffer, as it may contain a partial result. */
683 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, TRUE
, &res
);
684 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, TRUE
, &res
);
686 got_result(&res
, "read-only grant in iovec element");
690 /* Test word-unaligned iovec element buffer. */
691 memcpy(iov
, iovt
, sizeof(iovt
));
692 if ((grant
= cpf_grant_direct(driver_endpt
, (vir_bytes
) (buf2_ptr
+ 1),
693 buf2_size
- 2, CPF_WRITE
)) == GRANT_INVALID
)
694 panic("unable to allocate grant");
696 iov
[1].iov_grant
= grant
;
697 iov
[1].iov_size
= buf2_size
- 2;
699 buf_sum
= fill_rand(buf_ptr
, buf_size
);
700 buf2_sum
= fill_rand(buf2_ptr
, buf2_size
);
701 buf3_sum
= fill_rand(buf3_ptr
, buf3_size
);
703 c2
= buf2_ptr
[buf2_size
- 1];
705 raw_xfer(driver_minor
, cvu64(0), iov
, 3, FALSE
, BUF_SIZE
* 3 - 2,
708 if (accept_result(&res
, RESULT_BADSTATUS
, EINVAL
)) {
709 /* Do not test the first buffer, as it may contain a partial
712 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, TRUE
, &res
);
713 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, TRUE
, &res
);
715 test_sum(buf_ptr
, buf_size
, buf_sum
, FALSE
, &res
);
716 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, FALSE
, &res
);
717 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, FALSE
, &res
);
718 if (c1
!= buf2_ptr
[0] || c2
!= buf2_ptr
[buf2_size
- 1])
719 set_result(&res
, RESULT_CORRUPT
, 0);
722 got_result(&res
, "word-unaligned buffer in iovec element");
726 /* Test word-unaligned position. */
727 memcpy(iov
, iovt
, sizeof(iovt
));
729 buf_sum
= fill_rand(buf_ptr
, buf_size
);
730 buf2_sum
= fill_rand(buf2_ptr
, buf2_size
);
731 buf3_sum
= fill_rand(buf3_ptr
, buf3_size
);
733 raw_xfer(driver_minor
, cvu64(1), iov
, 3, FALSE
, EINVAL
, &res
);
735 test_sum(buf_ptr
, buf_size
, buf_sum
, TRUE
, &res
);
736 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, TRUE
, &res
);
737 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, TRUE
, &res
);
739 got_result(&res
, "word-unaligned position");
741 /* Test normal vector request (final check). */
742 memcpy(iov
, iovt
, sizeof(iovt
));
744 buf_sum
= fill_rand(buf_ptr
, buf_size
);
745 buf2_sum
= fill_rand(buf2_ptr
, buf2_size
);
746 buf3_sum
= fill_rand(buf3_ptr
, buf3_size
);
748 raw_xfer(driver_minor
, cvu64(0), iov
, 3, FALSE
,
749 buf_size
+ buf2_size
+ buf3_size
, &res
);
751 test_sum(buf_ptr
, buf_size
, buf_sum
, FALSE
, &res
);
752 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, FALSE
, &res
);
753 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, FALSE
, &res
);
755 got_result(&res
, "normal vector request");
758 free_buf_and_grant(buf3_ptr
, buf3_grant
, buf3_size
);
759 free_buf_and_grant(buf2_ptr
, buf2_grant
, buf2_size
);
760 free_buf_and_grant(buf_ptr
, buf_grant
, buf_size
);
763 #define SECTOR_UNALIGN 2 /* word-aligned and sector-unaligned */
765 static void bad_write(void)
767 /* Test various illegal write transfer requests, if writing is allowed.
768 * If handled correctly, these requests will not actually write data.
769 * However, the last test currently erroneously does end up writing.
771 u8_t
*buf_ptr
, *buf2_ptr
, *buf3_ptr
;
772 size_t buf_size
, buf2_size
, buf3_size
;
773 cp_grant_id_t buf_grant
, buf2_grant
, buf3_grant
;
775 u32_t buf_sum
, buf2_sum
, buf3_sum
;
776 iovec_s_t iov
[3], iovt
[3];
779 test_group("bad write requests", may_write
);
784 buf_size
= buf2_size
= buf3_size
= BUF_SIZE
;
786 alloc_buf_and_grant(&buf_ptr
, &buf_grant
, buf_size
, CPF_READ
);
787 alloc_buf_and_grant(&buf2_ptr
, &buf2_grant
, buf2_size
, CPF_READ
);
788 alloc_buf_and_grant(&buf3_ptr
, &buf3_grant
, buf3_size
, CPF_READ
);
790 iovt
[0].iov_grant
= buf_grant
;
791 iovt
[0].iov_size
= buf_size
;
792 iovt
[1].iov_grant
= buf2_grant
;
793 iovt
[1].iov_size
= buf2_size
;
794 iovt
[2].iov_grant
= buf3_grant
;
795 iovt
[2].iov_size
= buf3_size
;
797 /* Test sector-unaligned write position. */
798 memcpy(iov
, iovt
, sizeof(iovt
));
800 buf_sum
= fill_rand(buf_ptr
, buf_size
);
801 buf2_sum
= fill_rand(buf2_ptr
, buf2_size
);
802 buf3_sum
= fill_rand(buf3_ptr
, buf3_size
);
804 raw_xfer(driver_minor
, cvu64(SECTOR_UNALIGN
), iov
, 3, TRUE
, EINVAL
,
807 test_sum(buf_ptr
, buf_size
, buf_sum
, TRUE
, &res
);
808 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, TRUE
, &res
);
809 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, TRUE
, &res
);
811 got_result(&res
, "sector-unaligned write position");
813 /* Test sector-unaligned write size. */
814 memcpy(iov
, iovt
, sizeof(iovt
));
815 iov
[1].iov_size
-= SECTOR_UNALIGN
;
817 buf_sum
= fill_rand(buf_ptr
, buf_size
);
818 buf2_sum
= fill_rand(buf2_ptr
, buf2_size
);
819 buf3_sum
= fill_rand(buf3_ptr
, buf3_size
);
821 raw_xfer(driver_minor
, cvu64(0), iov
, 3, TRUE
, EINVAL
, &res
);
823 test_sum(buf_ptr
, buf_size
, buf_sum
, TRUE
, &res
);
824 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, TRUE
, &res
);
825 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, TRUE
, &res
);
827 got_result(&res
, "sector-unaligned write size");
829 /* Test write-only grant in iovec element. */
830 memcpy(iov
, iovt
, sizeof(iovt
));
831 if ((grant
= cpf_grant_direct(driver_endpt
, (vir_bytes
) buf2_ptr
,
832 buf2_size
, CPF_WRITE
)) == GRANT_INVALID
)
833 panic("unable to allocate grant");
835 iov
[1].iov_grant
= grant
;
837 buf_sum
= fill_rand(buf_ptr
, buf_size
);
838 buf2_sum
= fill_rand(buf2_ptr
, buf2_size
);
839 buf3_sum
= fill_rand(buf3_ptr
, buf3_size
);
841 raw_xfer(driver_minor
, cvu64(0), iov
, 3, TRUE
, EINVAL
, &res
);
843 accept_result(&res
, RESULT_BADSTATUS
, EPERM
);
845 test_sum(buf_ptr
, buf_size
, buf_sum
, TRUE
, &res
);
846 test_sum(buf2_ptr
, buf2_size
, buf2_sum
, TRUE
, &res
);
847 test_sum(buf3_ptr
, buf3_size
, buf3_sum
, TRUE
, &res
);
849 got_result(&res
, "write-only grant in iovec element");
854 free_buf_and_grant(buf3_ptr
, buf3_grant
, buf3_size
);
855 free_buf_and_grant(buf2_ptr
, buf2_grant
, buf2_size
);
856 free_buf_and_grant(buf_ptr
, buf_grant
, buf_size
);
859 static void vector_and_large_sub(size_t small_size
)
861 /* Check whether large vectored requests, and large single requests,
864 size_t large_size
, buf_size
, buf2_size
;
865 u8_t
*buf_ptr
, *buf2_ptr
;
866 iovec_t iovec
[NR_IOREQS
];
871 base_pos
= cvu64(sector_size
);
873 large_size
= small_size
* NR_IOREQS
;
875 buf_size
= large_size
+ sizeof(u32_t
) * 2;
876 buf2_size
= large_size
+ sizeof(u32_t
) * (NR_IOREQS
+ 1);
878 buf_ptr
= alloc_contig(buf_size
, 0, NULL
);
879 buf2_ptr
= alloc_contig(buf2_size
, 0, NULL
);
880 if (buf_ptr
== NULL
|| buf2_ptr
== NULL
)
881 panic("unable to allocate memory");
883 /* The first buffer has one large chunk with dword-sized guards on each
884 * side. LPTR(n) points to the start of the nth small data chunk within
885 * the large chunk. The second buffer contains several small chunks. It
886 * has dword-sized guards before each chunk and after the last chunk.
887 * SPTR(n) points to the start of the nth small chunk.
889 #define SPTR(n) (buf2_ptr + sizeof(u32_t) + (n) * (sizeof(u32_t) + small_size))
890 #define LPTR(n) (buf_ptr + sizeof(u32_t) + small_size * (n))
892 /* Write one large chunk, if writing is allowed. */
894 fill_rand(buf_ptr
, buf_size
); /* don't need the checksum */
896 iovec
[0].iov_addr
= (vir_bytes
) (buf_ptr
+ sizeof(u32_t
));
897 iovec
[0].iov_size
= large_size
;
899 vir_xfer(driver_minor
, base_pos
, iovec
, 1, TRUE
, large_size
,
902 got_result(&res
, "large write");
905 /* Read back in many small chunks. If writing is not allowed, do not
908 for (i
= 0; i
< NR_IOREQS
; i
++) {
909 * (((u32_t
*) SPTR(i
)) - 1) = 0xDEADBEEFL
+ i
;
910 iovec
[i
].iov_addr
= (vir_bytes
) SPTR(i
);
911 iovec
[i
].iov_size
= small_size
;
913 * (((u32_t
*) SPTR(i
)) - 1) = 0xFEEDFACEL
;
915 vir_xfer(driver_minor
, base_pos
, iovec
, NR_IOREQS
, FALSE
, large_size
,
918 if (res
.type
== RESULT_OK
) {
919 for (i
= 0; i
< NR_IOREQS
; i
++) {
920 if (* (((u32_t
*) SPTR(i
)) - 1) != 0xDEADBEEFL
+ i
)
921 set_result(&res
, RESULT_OVERFLOW
, 0);
923 if (* (((u32_t
*) SPTR(i
)) - 1) != 0xFEEDFACEL
)
924 set_result(&res
, RESULT_OVERFLOW
, 0);
927 if (res
.type
== RESULT_OK
&& may_write
) {
928 for (i
= 0; i
< NR_IOREQS
; i
++) {
929 test_sum(SPTR(i
), small_size
,
930 get_sum(LPTR(i
), small_size
), TRUE
, &res
);
934 got_result(&res
, "vectored read");
936 /* Write new data in many small chunks, if writing is allowed. */
938 fill_rand(buf2_ptr
, buf2_size
); /* don't need the checksum */
940 for (i
= 0; i
< NR_IOREQS
; i
++) {
941 iovec
[i
].iov_addr
= (vir_bytes
) SPTR(i
);
942 iovec
[i
].iov_size
= small_size
;
945 vir_xfer(driver_minor
, base_pos
, iovec
, NR_IOREQS
, TRUE
,
948 got_result(&res
, "vectored write");
951 /* Read back in one large chunk. If writing is allowed, the checksums
952 * must match the last write; otherwise, they must match the last read.
953 * In both cases, the expected content is in the second buffer.
956 * (u32_t
*) buf_ptr
= 0xCAFEBABEL
;
957 * (u32_t
*) (buf_ptr
+ sizeof(u32_t
) + large_size
) = 0xDECAFBADL
;
959 iovec
[0].iov_addr
= (vir_bytes
) (buf_ptr
+ sizeof(u32_t
));
960 iovec
[0].iov_size
= large_size
;
962 vir_xfer(driver_minor
, base_pos
, iovec
, 1, FALSE
, large_size
, &res
);
964 if (res
.type
== RESULT_OK
) {
965 if (* (u32_t
*) buf_ptr
!= 0xCAFEBABEL
)
966 set_result(&res
, RESULT_OVERFLOW
, 0);
967 if (* (u32_t
*) (buf_ptr
+ sizeof(u32_t
) + large_size
) !=
969 set_result(&res
, RESULT_OVERFLOW
, 0);
972 if (res
.type
== RESULT_OK
) {
973 for (i
= 0; i
< NR_IOREQS
; i
++) {
974 test_sum(SPTR(i
), small_size
,
975 get_sum(LPTR(i
), small_size
), TRUE
, &res
);
979 got_result(&res
, "large read");
985 free_contig(buf2_ptr
, buf2_size
);
986 free_contig(buf_ptr
, buf_size
);
989 static void vector_and_large(void)
991 /* Check whether large vectored requests, and large single requests,
992 * succeed. These are request patterns commonly used by MFS and the
993 * filter driver, respectively. We try the same test twice: once with
994 * a common block size, and once to push against the max request size.
998 /* Compute the largest sector multiple which, when multiplied by
999 * NR_IOREQS, is no more than the maximum transfer size. Note that if
1000 * max_size is not a multiple of sector_size, we're not going up to the
1001 * limit entirely this way.
1003 max_block
= max_size
/ NR_IOREQS
;
1004 max_block
-= max_block
% sector_size
;
1006 #define COMMON_BLOCK_SIZE 4096
1008 test_group("vector and large, common block", TRUE
);
1010 vector_and_large_sub(COMMON_BLOCK_SIZE
);
1012 if (max_block
!= COMMON_BLOCK_SIZE
) {
1013 test_group("vector and large, large block", TRUE
);
1015 vector_and_large_sub(max_block
);
1019 static void open_device(dev_t minor
)
1021 /* Open a partition or subpartition. Remember that it has been opened,
1022 * so that we can reopen it later in the event of a driver crash.
1027 memset(&m
, 0, sizeof(m
));
1028 m
.m_type
= BDEV_OPEN
;
1029 m
.BDEV_MINOR
= minor
;
1030 m
.BDEV_ACCESS
= may_write
? (R_BIT
| W_BIT
) : R_BIT
;
1031 m
.BDEV_ID
= lrand48();
1033 sendrec_driver(&m
, OK
, &res
);
1035 /* We assume that this call is supposed to succeed. We pretend it
1036 * always succeeds, so that close_device() won't get confused later.
1038 assert(nr_opened
< NR_OPENED
);
1039 opened
[nr_opened
++] = minor
;
1041 got_result(&res
, minor
== driver_minor
? "opening the main partition" :
1042 "opening a subpartition");
1045 static void close_device(dev_t minor
)
1047 /* Close a partition or subpartition. Remove it from the list of opened
1054 memset(&m
, 0, sizeof(m
));
1055 m
.m_type
= BDEV_CLOSE
;
1056 m
.BDEV_MINOR
= minor
;
1057 m
.BDEV_ID
= lrand48();
1059 sendrec_driver(&m
, OK
, &res
);
1061 assert(nr_opened
> 0);
1062 for (i
= 0; i
< nr_opened
; i
++) {
1063 if (opened
[i
] == minor
) {
1064 opened
[i
] = opened
[--nr_opened
];
1069 got_result(&res
, minor
== driver_minor
? "closing the main partition" :
1070 "closing a subpartition");
1073 static int vir_ioctl(dev_t minor
, int req
, void *ptr
, ssize_t exp
,
1076 /* Perform an I/O control request, using a local buffer.
1078 cp_grant_id_t grant
;
1082 assert(!_MINIX_IOCTL_BIG(req
)); /* not supported */
1085 if (_MINIX_IOCTL_IOR(req
)) perm
|= CPF_WRITE
;
1086 if (_MINIX_IOCTL_IOW(req
)) perm
|= CPF_READ
;
1088 if ((grant
= cpf_grant_direct(driver_endpt
, (vir_bytes
) ptr
,
1089 _MINIX_IOCTL_SIZE(req
), perm
)) == GRANT_INVALID
)
1090 panic("unable to allocate grant");
1092 memset(&m
, 0, sizeof(m
));
1093 m
.m_type
= BDEV_IOCTL
;
1094 m
.BDEV_MINOR
= minor
;
1097 m
.BDEV_REQUEST
= req
;
1098 m
.BDEV_GRANT
= grant
;
1099 m
.BDEV_ID
= lrand48();
1101 r
= sendrec_driver(&m
, exp
, res
);
1103 if (cpf_revoke(grant
) != OK
)
1104 panic("unable to revoke grant");
1109 static void misc_ioctl(void)
1111 /* Test some ioctls.
1116 test_group("test miscellaneous ioctls", TRUE
);
1118 /* Retrieve the main partition's base and size. Save for later. */
1119 vir_ioctl(driver_minor
, DIOCGETP
, &part
, OK
, &res
);
1121 got_result(&res
, "ioctl to get partition");
1123 /* The other tests do not check whether there is sufficient room. */
1124 if (res
.type
== RESULT_OK
&& cmp64u(part
.size
, max_size
* 2) < 0)
1125 printf("WARNING: small partition, some tests may fail\n");
1127 /* Test retrieving global driver open count. */
1128 openct
= 0x0badcafe;
1130 vir_ioctl(driver_minor
, DIOCOPENCT
, &openct
, OK
, &res
);
1132 /* We assume that we're the only client to the driver right now. */
1133 if (res
.type
== RESULT_OK
&& openct
!= 1) {
1134 res
.type
= RESULT_BADVALUE
;
1138 got_result(&res
, "ioctl to get open count");
1140 /* Test increasing and re-retrieving open count. */
1141 open_device(driver_minor
);
1143 openct
= 0x0badcafe;
1145 vir_ioctl(driver_minor
, DIOCOPENCT
, &openct
, OK
, &res
);
1147 if (res
.type
== RESULT_OK
&& openct
!= 2) {
1148 res
.type
= RESULT_BADVALUE
;
1152 got_result(&res
, "increased open count after opening");
1154 /* Test decreasing and re-retrieving open count. */
1155 close_device(driver_minor
);
1157 openct
= 0x0badcafe;
1159 vir_ioctl(driver_minor
, DIOCOPENCT
, &openct
, OK
, &res
);
1161 if (res
.type
== RESULT_OK
&& openct
!= 1) {
1162 res
.type
= RESULT_BADVALUE
;
1166 got_result(&res
, "decreased open count after closing");
1169 static void read_limits(dev_t sub0_minor
, dev_t sub1_minor
, size_t sub_size
)
1171 /* Test reads up to, across, and beyond partition limits.
1175 u32_t sum
, sum2
, sum3
;
1178 test_group("read around subpartition limits", TRUE
);
1180 buf_size
= sector_size
* 3;
1182 if ((buf_ptr
= alloc_contig(buf_size
, 0, NULL
)) == NULL
)
1183 panic("unable to allocate memory");
1185 /* Read one sector up to the partition limit. */
1186 fill_rand(buf_ptr
, buf_size
);
1188 simple_xfer(sub0_minor
, cvu64(sub_size
- sector_size
), buf_ptr
,
1189 sector_size
, FALSE
, sector_size
, &res
);
1191 sum
= get_sum(buf_ptr
, sector_size
);
1193 got_result(&res
, "one sector read up to partition end");
1195 /* Read three sectors up to the partition limit. */
1196 fill_rand(buf_ptr
, buf_size
);
1198 simple_xfer(sub0_minor
, cvu64(sub_size
- buf_size
), buf_ptr
, buf_size
,
1199 FALSE
, buf_size
, &res
);
1201 test_sum(buf_ptr
+ sector_size
* 2, sector_size
, sum
, TRUE
, &res
);
1203 sum2
= get_sum(buf_ptr
+ sector_size
, sector_size
* 2);
1205 got_result(&res
, "multisector read up to partition end");
1207 /* Read three sectors, two up to and one beyond the partition end. */
1208 fill_rand(buf_ptr
, buf_size
);
1209 sum3
= get_sum(buf_ptr
+ sector_size
* 2, sector_size
);
1211 simple_xfer(sub0_minor
, cvu64(sub_size
- sector_size
* 2), buf_ptr
,
1212 buf_size
, FALSE
, sector_size
* 2, &res
);
1214 test_sum(buf_ptr
, sector_size
* 2, sum2
, TRUE
, &res
);
1215 test_sum(buf_ptr
+ sector_size
* 2, sector_size
, sum3
, TRUE
, &res
);
1217 got_result(&res
, "read somewhat across partition end");
1219 /* Read three sectors, one up to and two beyond the partition end. */
1220 fill_rand(buf_ptr
, buf_size
);
1221 sum2
= get_sum(buf_ptr
+ sector_size
, sector_size
* 2);
1223 simple_xfer(sub0_minor
, cvu64(sub_size
- sector_size
), buf_ptr
,
1224 buf_size
, FALSE
, sector_size
, &res
);
1226 test_sum(buf_ptr
, sector_size
, sum
, TRUE
, &res
);
1227 test_sum(buf_ptr
+ sector_size
, sector_size
* 2, sum2
, TRUE
, &res
);
1229 got_result(&res
, "read mostly across partition end");
1231 /* Read one sector starting at the partition end. */
1232 sum
= fill_rand(buf_ptr
, buf_size
);
1233 sum2
= get_sum(buf_ptr
, sector_size
);
1235 simple_xfer(sub0_minor
, cvu64(sub_size
), buf_ptr
, sector_size
, FALSE
,
1238 test_sum(buf_ptr
, sector_size
, sum2
, TRUE
, &res
);
1240 got_result(&res
, "one sector read at partition end");
1242 /* Read three sectors starting at the partition end. */
1243 simple_xfer(sub0_minor
, cvu64(sub_size
), buf_ptr
, buf_size
, FALSE
, 0,
1246 test_sum(buf_ptr
, buf_size
, sum
, TRUE
, &res
);
1248 got_result(&res
, "multisector read at partition end");
1250 /* Read one sector beyond the partition end. */
1251 simple_xfer(sub0_minor
, cvu64(sub_size
+ sector_size
), buf_ptr
,
1252 buf_size
, FALSE
, 0, &res
);
1254 test_sum(buf_ptr
, sector_size
, sum2
, TRUE
, &res
);
1256 got_result(&res
, "single sector read beyond partition end");
1258 /* Read three sectors way beyond the partition end. */
1259 simple_xfer(sub0_minor
, make64(0L, 0x10000000L
), buf_ptr
,
1260 buf_size
, FALSE
, 0, &res
);
1262 test_sum(buf_ptr
, buf_size
, sum
, TRUE
, &res
);
1264 /* Test negative offsets. This request should return EOF or fail; we
1265 * assume that it return EOF here (because that is what the AHCI driver
1266 * does, to avoid producing errors for requests close to the 2^64 byte
1267 * position limit [yes, this will indeed never happen anyway]). This is
1268 * more or less a bad requests test, but we cannot do it without
1269 * setting up subpartitions first.
1271 simple_xfer(sub1_minor
, make64(0xffffffffL
- sector_size
+ 1,
1272 0xffffffffL
), buf_ptr
, sector_size
, FALSE
, 0, &res
);
1274 test_sum(buf_ptr
, sector_size
, sum2
, TRUE
, &res
);
1276 got_result(&res
, "read with negative offset");
1279 free_contig(buf_ptr
, buf_size
);
1282 static void write_limits(dev_t sub0_minor
, dev_t sub1_minor
, size_t sub_size
)
1284 /* Test writes up to, across, and beyond partition limits. Use the
1285 * first given subpartition to test, and the second to make sure there
1286 * are no overruns. The given size is the size of each of the
1287 * subpartitions. Note that the necessity to check the results using
1288 * readback, makes this more or less a superset of the read test.
1292 u32_t sum
, sum2
, sum3
, sub1_sum
;
1295 test_group("write around subpartition limits", may_write
);
1300 buf_size
= sector_size
* 3;
1302 if ((buf_ptr
= alloc_contig(buf_size
, 0, NULL
)) == NULL
)
1303 panic("unable to allocate memory");
1305 /* Write to the start of the second subpartition, so that we can
1306 * reliably check whether the contents have changed later.
1308 sub1_sum
= fill_rand(buf_ptr
, buf_size
);
1310 simple_xfer(sub1_minor
, cvu64(0), buf_ptr
, buf_size
, TRUE
, buf_size
,
1313 got_result(&res
, "write to second subpartition");
1315 /* Write one sector, up to the partition limit. */
1316 sum
= fill_rand(buf_ptr
, sector_size
);
1318 simple_xfer(sub0_minor
, cvu64(sub_size
- sector_size
), buf_ptr
,
1319 sector_size
, TRUE
, sector_size
, &res
);
1321 got_result(&res
, "write up to partition end");
1323 /* Read back to make sure the results have persisted. */
1324 fill_rand(buf_ptr
, sector_size
* 2);
1326 simple_xfer(sub0_minor
, cvu64(sub_size
- sector_size
* 2), buf_ptr
,
1327 sector_size
* 2, FALSE
, sector_size
* 2, &res
);
1329 test_sum(buf_ptr
+ sector_size
, sector_size
, sum
, TRUE
, &res
);
1331 got_result(&res
, "read up to partition end");
1333 /* Write three sectors, two up to and one beyond the partition end. */
1334 fill_rand(buf_ptr
, buf_size
);
1335 sum
= get_sum(buf_ptr
+ sector_size
, sector_size
);
1336 sum3
= get_sum(buf_ptr
, sector_size
);
1338 simple_xfer(sub0_minor
, cvu64(sub_size
- sector_size
* 2), buf_ptr
,
1339 buf_size
, TRUE
, sector_size
* 2, &res
);
1341 got_result(&res
, "write somewhat across partition end");
1343 /* Read three sectors, one up to and two beyond the partition end. */
1344 fill_rand(buf_ptr
, buf_size
);
1345 sum2
= get_sum(buf_ptr
+ sector_size
, sector_size
* 2);
1347 simple_xfer(sub0_minor
, cvu64(sub_size
- sector_size
), buf_ptr
,
1348 buf_size
, FALSE
, sector_size
, &res
);
1350 test_sum(buf_ptr
, sector_size
, sum
, TRUE
, &res
);
1351 test_sum(buf_ptr
+ sector_size
, sector_size
* 2, sum2
, TRUE
, &res
);
1353 got_result(&res
, "read mostly across partition end");
1355 /* Repeat this but with write and read start positions swapped. */
1356 fill_rand(buf_ptr
, buf_size
);
1357 sum
= get_sum(buf_ptr
, sector_size
);
1359 simple_xfer(sub0_minor
, cvu64(sub_size
- sector_size
), buf_ptr
,
1360 buf_size
, TRUE
, sector_size
, &res
);
1362 got_result(&res
, "write mostly across partition end");
1364 fill_rand(buf_ptr
, buf_size
);
1365 sum2
= get_sum(buf_ptr
+ sector_size
* 2, sector_size
);
1367 simple_xfer(sub0_minor
, cvu64(sub_size
- sector_size
* 2), buf_ptr
,
1368 buf_size
, FALSE
, sector_size
* 2, &res
);
1370 test_sum(buf_ptr
, sector_size
, sum3
, TRUE
, &res
);
1371 test_sum(buf_ptr
+ sector_size
, sector_size
, sum
, TRUE
, &res
);
1372 test_sum(buf_ptr
+ sector_size
* 2, sector_size
, sum2
, TRUE
, &res
);
1374 got_result(&res
, "read somewhat across partition end");
1376 /* Write one sector at the end of the partition. */
1377 fill_rand(buf_ptr
, sector_size
);
1379 simple_xfer(sub0_minor
, cvu64(sub_size
), buf_ptr
, sector_size
, TRUE
, 0,
1382 got_result(&res
, "write at partition end");
1384 /* Write one sector beyond the end of the partition. */
1385 simple_xfer(sub0_minor
, cvu64(sub_size
+ sector_size
), buf_ptr
,
1386 sector_size
, TRUE
, 0, &res
);
1388 got_result(&res
, "write beyond partition end");
1390 /* Read from the start of the second subpartition, and see if it
1391 * matches what we wrote into it earlier.
1393 fill_rand(buf_ptr
, buf_size
);
1395 simple_xfer(sub1_minor
, cvu64(0), buf_ptr
, buf_size
, FALSE
, buf_size
,
1398 test_sum(buf_ptr
, buf_size
, sub1_sum
, TRUE
, &res
);
1400 got_result(&res
, "read from second subpartition");
1402 /* Test offset wrapping, but this time for writes. */
1403 fill_rand(buf_ptr
, sector_size
);
1405 simple_xfer(sub1_minor
, make64(0xffffffffL
- sector_size
+ 1,
1406 0xffffffffL
), buf_ptr
, sector_size
, TRUE
, 0, &res
);
1408 got_result(&res
, "write with negative offset");
1410 /* If the last request erroneously succeeded, it would have overwritten
1411 * the last sector of the first subpartition.
1413 simple_xfer(sub0_minor
, cvu64(sub_size
- sector_size
), buf_ptr
,
1414 sector_size
, FALSE
, sector_size
, &res
);
1416 test_sum(buf_ptr
, sector_size
, sum
, TRUE
, &res
);
1418 got_result(&res
, "read up to partition end");
1421 free_contig(buf_ptr
, buf_size
);
1424 static void vir_limits(dev_t sub0_minor
, dev_t sub1_minor
, int part_secs
)
1426 /* Create virtual, temporary subpartitions through the DIOCSETP ioctl,
1427 * and perform tests on the resulting subpartitions.
1429 struct partition subpart
, subpart2
;
1433 test_group("virtual subpartition limits", TRUE
);
1435 /* Open the subpartitions. This is somewhat dodgy; we rely on the
1436 * driver allowing this even if no subpartitions exist. We cannot do
1437 * this test without doing a DIOCSETP on an open subdevice, though.
1439 open_device(sub0_minor
);
1440 open_device(sub1_minor
);
1442 sub_size
= sector_size
* part_secs
;
1444 /* Set, and check, the size of the first subpartition. */
1446 subpart
.size
= cvu64(sub_size
);
1448 vir_ioctl(sub0_minor
, DIOCSETP
, &subpart
, OK
, &res
);
1450 got_result(&res
, "ioctl to set first subpartition");
1452 vir_ioctl(sub0_minor
, DIOCGETP
, &subpart2
, OK
, &res
);
1454 if (res
.type
== RESULT_OK
&& (cmp64(subpart
.base
, subpart2
.base
) ||
1455 cmp64(subpart
.size
, subpart2
.size
))) {
1456 res
.type
= RESULT_BADVALUE
;
1460 got_result(&res
, "ioctl to get first subpartition");
1462 /* Set, and check, the base and size of the second subpartition. */
1464 subpart
.base
= add64u(subpart
.base
, sub_size
);
1465 subpart
.size
= cvu64(sub_size
);
1467 vir_ioctl(sub1_minor
, DIOCSETP
, &subpart
, OK
, &res
);
1469 got_result(&res
, "ioctl to set second subpartition");
1471 vir_ioctl(sub1_minor
, DIOCGETP
, &subpart2
, OK
, &res
);
1473 if (res
.type
== RESULT_OK
&& (cmp64(subpart
.base
, subpart2
.base
) ||
1474 cmp64(subpart
.size
, subpart2
.size
))) {
1475 res
.type
= RESULT_BADVALUE
;
1479 got_result(&res
, "ioctl to get second subpartition");
1481 /* Perform the actual I/O tests. */
1482 read_limits(sub0_minor
, sub1_minor
, sub_size
);
1484 write_limits(sub0_minor
, sub1_minor
, sub_size
);
1487 close_device(sub1_minor
);
1488 close_device(sub0_minor
);
1491 static void real_limits(dev_t sub0_minor
, dev_t sub1_minor
, int part_secs
)
1493 /* Create our own subpartitions by writing a partition table, and
1494 * perform tests on the resulting real subpartitions.
1497 size_t buf_size
, sub_size
;
1498 struct partition subpart
;
1499 struct part_entry
*entry
;
1502 test_group("real subpartition limits", may_write
);
1507 sub_size
= sector_size
* part_secs
;
1509 /* Technically, we should be using 512 instead of sector_size in
1510 * various places, because even on CD-ROMs, the partition tables are
1511 * 512 bytes and the sector counts are based on 512-byte sectors in it.
1512 * We ignore this subtlety because CD-ROMs are assumed to be read-only
1515 buf_size
= sector_size
;
1517 if ((buf_ptr
= alloc_contig(buf_size
, 0, NULL
)) == NULL
)
1518 panic("unable to allocate memory");
1520 memset(buf_ptr
, 0, buf_size
);
1522 /* Write an invalid partition table. */
1523 simple_xfer(driver_minor
, cvu64(0), buf_ptr
, buf_size
, TRUE
, buf_size
,
1526 got_result(&res
, "write of invalid partition table");
1528 /* Get the disk driver to reread the partition table. This should
1529 * happen (at least) when the device is fully closed and then reopened.
1530 * The ioctl test already made sure that we're the only client.
1532 close_device(driver_minor
);
1533 open_device(driver_minor
);
1535 /* See if our changes are visible. We expect the subpartitions to have
1536 * a size of zero now, indicating that they're not there. For actual
1537 * subpartitions (as opposed to normal partitions), this requires the
1538 * driver to zero them out, because the partition code does not do so.
1540 open_device(sub0_minor
);
1541 open_device(sub1_minor
);
1543 vir_ioctl(sub0_minor
, DIOCGETP
, &subpart
, 0, &res
);
1545 if (res
.type
== RESULT_OK
&& cmp64u(subpart
.size
, 0)) {
1546 res
.type
= RESULT_BADVALUE
;
1547 res
.value
= ex64lo(subpart
.size
);
1550 got_result(&res
, "ioctl to get first subpartition");
1552 vir_ioctl(sub1_minor
, DIOCGETP
, &subpart
, 0, &res
);
1554 if (res
.type
== RESULT_OK
&& cmp64u(subpart
.size
, 0)) {
1555 res
.type
= RESULT_BADVALUE
;
1556 res
.value
= ex64lo(subpart
.size
);
1559 got_result(&res
, "ioctl to get second subpartition");
1561 close_device(sub1_minor
);
1562 close_device(sub0_minor
);
1564 /* Now write a valid partition table. */
1565 memset(buf_ptr
, 0, buf_size
);
1567 entry
= (struct part_entry
*) &buf_ptr
[PART_TABLE_OFF
];
1569 entry
[0].sysind
= MINIX_PART
;
1570 entry
[0].lowsec
= div64u(part
.base
, sector_size
) + 1;
1571 entry
[0].size
= part_secs
;
1572 entry
[1].sysind
= MINIX_PART
;
1573 entry
[1].lowsec
= entry
[0].lowsec
+ entry
[0].size
;
1574 entry
[1].size
= part_secs
;
1576 buf_ptr
[510] = 0x55;
1577 buf_ptr
[511] = 0xAA;
1579 simple_xfer(driver_minor
, cvu64(0), buf_ptr
, buf_size
, TRUE
, buf_size
,
1582 got_result(&res
, "write of valid partition table");
1584 /* Same as above. */
1585 close_device(driver_minor
);
1586 open_device(driver_minor
);
1588 /* Again, see if our changes are visible. This time the proper base and
1589 * size should be there.
1591 open_device(sub0_minor
);
1592 open_device(sub1_minor
);
1594 vir_ioctl(sub0_minor
, DIOCGETP
, &subpart
, 0, &res
);
1596 if (res
.type
== RESULT_OK
&& (cmp64(subpart
.base
,
1597 add64u(part
.base
, sector_size
)) ||
1598 cmp64u(subpart
.size
, part_secs
* sector_size
))) {
1600 res
.type
= RESULT_BADVALUE
;
1604 got_result(&res
, "ioctl to get first subpartition");
1606 vir_ioctl(sub1_minor
, DIOCGETP
, &subpart
, 0, &res
);
1608 if (res
.type
== RESULT_OK
&& (cmp64(subpart
.base
,
1609 add64u(part
.base
, (1 + part_secs
) * sector_size
)) ||
1610 cmp64u(subpart
.size
, part_secs
* sector_size
))) {
1612 res
.type
= RESULT_BADVALUE
;
1616 got_result(&res
, "ioctl to get second subpartition");
1618 /* Now perform the actual I/O tests. */
1619 read_limits(sub0_minor
, sub1_minor
, sub_size
);
1621 write_limits(sub0_minor
, sub1_minor
, sub_size
);
1624 close_device(sub0_minor
);
1625 close_device(sub1_minor
);
1627 free_contig(buf_ptr
, buf_size
);
1630 static void part_limits(void)
1632 /* Test reads and writes up to, across, and beyond partition limits.
1633 * As a side effect, test reading and writing partition sizes and
1634 * rereading partition tables.
1636 dev_t par
, sub0_minor
, sub1_minor
;
1638 /* First determine the first two subpartitions of the partition that we
1639 * are operating on. If we are already operating on a subpartition, we
1640 * cannot conduct this test.
1642 if (driver_minor
>= MINOR_d0p0s0
) {
1643 printf("WARNING: operating on subpartition, "
1644 "skipping partition tests\n");
1647 par
= driver_minor
% DEV_PER_DRIVE
;
1648 if (par
> 0) /* adapted from libdriver's drvlib code */
1649 sub0_minor
= MINOR_d0p0s0
+ ((driver_minor
/ DEV_PER_DRIVE
) *
1650 NR_PARTITIONS
+ par
- 1) * NR_PARTITIONS
;
1652 sub0_minor
= driver_minor
+ 1;
1653 sub1_minor
= sub0_minor
+ 1;
1655 #define PART_SECS 9 /* sectors in each partition. must be >= 4. */
1657 /* First try the test with temporarily specified subpartitions. */
1658 vir_limits(sub0_minor
, sub1_minor
, PART_SECS
);
1660 /* Then, if we're allowed to write, try the test with real, persisted
1663 real_limits(sub0_minor
, sub1_minor
, PART_SECS
- 1);
1667 static void unaligned_size_io(u64_t base_pos
, u8_t
*buf_ptr
, size_t buf_size
,
1668 u8_t
*sec_ptr
[2], int sectors
, int pattern
, u32_t ssum
[5])
1670 /* Perform a single small-element I/O read, write, readback test.
1671 * The number of sectors and the pattern varies with each call.
1672 * The ssum array has to be updated to reflect the five sectors'
1673 * checksums on disk, if writing is enabled. Note that for
1675 iovec_t iov
[3], iovt
[3];
1681 base_pos
= add64u(base_pos
, sector_size
);
1682 total_size
= sector_size
* sectors
;
1684 /* If the limit is two elements per sector, we cannot test three
1685 * elements in a single sector.
1687 if (sector_size
/ element_size
== 2 && sectors
== 1 && pattern
== 2)
1690 /* Set up the buffers and I/O vector. We use different buffers for the
1691 * elements to minimize the chance that something "accidentally" goes
1692 * right, but that means we have to do memory copying to do checksum
1695 fill_rand(sec_ptr
[0], sector_size
);
1697 get_sum(sec_ptr
[0] + element_size
, sector_size
- element_size
);
1699 fill_rand(buf_ptr
, buf_size
);
1703 /* First pattern: a small element on the left. */
1704 iovt
[0].iov_addr
= (vir_bytes
) sec_ptr
[0];
1705 iovt
[0].iov_size
= element_size
;
1707 iovt
[1].iov_addr
= (vir_bytes
) buf_ptr
;
1708 iovt
[1].iov_size
= total_size
- element_size
;
1709 rsum
[1] = get_sum(buf_ptr
+ iovt
[1].iov_size
, element_size
);
1714 /* Second pattern: a small element on the right. */
1715 iovt
[0].iov_addr
= (vir_bytes
) buf_ptr
;
1716 iovt
[0].iov_size
= total_size
- element_size
;
1717 rsum
[1] = get_sum(buf_ptr
+ iovt
[0].iov_size
, element_size
);
1719 iovt
[1].iov_addr
= (vir_bytes
) sec_ptr
[0];
1720 iovt
[1].iov_size
= element_size
;
1725 /* Third pattern: a small element on each side. */
1726 iovt
[0].iov_addr
= (vir_bytes
) sec_ptr
[0];
1727 iovt
[0].iov_size
= element_size
;
1729 iovt
[1].iov_addr
= (vir_bytes
) buf_ptr
;
1730 iovt
[1].iov_size
= total_size
- element_size
* 2;
1731 rsum
[1] = get_sum(buf_ptr
+ iovt
[1].iov_size
,
1734 fill_rand(sec_ptr
[1], sector_size
);
1735 iovt
[2].iov_addr
= (vir_bytes
) sec_ptr
[1];
1736 iovt
[2].iov_size
= element_size
;
1737 rsum
[2] = get_sum(sec_ptr
[1] + element_size
,
1738 sector_size
- element_size
);
1746 /* Perform a read with small elements, and test whether the result is
1749 memcpy(iov
, iovt
, sizeof(iov
));
1750 vir_xfer(driver_minor
, base_pos
, iov
, nr_req
, FALSE
, total_size
, &res
);
1752 test_sum(sec_ptr
[0] + element_size
, sector_size
- element_size
,
1753 rsum
[0], TRUE
, &res
);
1757 test_sum(buf_ptr
+ iovt
[1].iov_size
, element_size
, rsum
[1],
1759 memmove(buf_ptr
+ element_size
, buf_ptr
, iovt
[1].iov_size
);
1760 memcpy(buf_ptr
, sec_ptr
[0], element_size
);
1763 test_sum(buf_ptr
+ iovt
[0].iov_size
, element_size
, rsum
[1],
1765 memcpy(buf_ptr
+ iovt
[0].iov_size
, sec_ptr
[0], element_size
);
1768 test_sum(buf_ptr
+ iovt
[1].iov_size
, element_size
* 2, rsum
[1],
1770 test_sum(sec_ptr
[1] + element_size
, sector_size
- element_size
,
1771 rsum
[2], TRUE
, &res
);
1772 memmove(buf_ptr
+ element_size
, buf_ptr
, iovt
[1].iov_size
);
1773 memcpy(buf_ptr
, sec_ptr
[0], element_size
);
1774 memcpy(buf_ptr
+ element_size
+ iovt
[1].iov_size
, sec_ptr
[1],
1780 for (i
= 0; i
< sectors
; i
++)
1781 test_sum(buf_ptr
+ sector_size
* i
, sector_size
, ssum
[1 + i
],
1784 got_result(&res
, "read with small elements");
1786 /* In read-only mode, we have nothing more to do. */
1790 /* Use the same I/O vector to perform a write with small elements.
1791 * This will cause the checksums of the target sectors to change,
1792 * so we need to update those for both verification and later usage.
1794 for (i
= 0; i
< sectors
; i
++)
1796 fill_rand(buf_ptr
+ sector_size
* i
, sector_size
);
1800 memcpy(sec_ptr
[0], buf_ptr
, element_size
);
1801 memmove(buf_ptr
, buf_ptr
+ element_size
, iovt
[1].iov_size
);
1802 fill_rand(buf_ptr
+ iovt
[1].iov_size
, element_size
);
1805 memcpy(sec_ptr
[0], buf_ptr
+ iovt
[0].iov_size
, element_size
);
1806 fill_rand(buf_ptr
+ iovt
[0].iov_size
, element_size
);
1809 memcpy(sec_ptr
[0], buf_ptr
, element_size
);
1810 memcpy(sec_ptr
[1], buf_ptr
+ element_size
+ iovt
[1].iov_size
,
1812 memmove(buf_ptr
, buf_ptr
+ element_size
, iovt
[1].iov_size
);
1813 fill_rand(buf_ptr
+ iovt
[1].iov_size
, element_size
* 2);
1817 memcpy(iov
, iovt
, sizeof(iov
));
1819 vir_xfer(driver_minor
, base_pos
, iov
, nr_req
, TRUE
, total_size
, &res
);
1821 got_result(&res
, "write with small elements");
1823 /* Now perform normal readback verification. */
1824 fill_rand(buf_ptr
, sector_size
* 3);
1826 simple_xfer(driver_minor
, base_pos
, buf_ptr
, sector_size
* 3, FALSE
,
1827 sector_size
* 3, &res
);
1829 for (i
= 0; i
< 3; i
++)
1830 test_sum(buf_ptr
+ sector_size
* i
, sector_size
, ssum
[1 + i
],
1833 got_result(&res
, "readback verification");
1836 static void unaligned_size(void)
1838 /* Test sector-unaligned sizes in I/O vector elements. The total size
1839 * of the request, however, has to add up to the sector size.
1841 u8_t
*buf_ptr
, *sec_ptr
[2];
1843 u32_t sum
= 0L, ssum
[5];
1848 test_group("sector-unaligned elements", sector_size
!= element_size
);
1850 /* We can only do this test if the driver allows small elements. */
1851 if (sector_size
== element_size
)
1854 /* Crashing on bad user input, terrible! */
1855 assert(sector_size
% element_size
== 0);
1857 /* Establish a baseline by writing and reading back five sectors; or
1858 * by reading only, if writing is disabled.
1860 buf_size
= sector_size
* 5;
1862 base_pos
= cvu64(sector_size
* 2);
1864 if ((buf_ptr
= alloc_contig(buf_size
, 0, NULL
)) == NULL
)
1865 panic("unable to allocate memory");
1867 if ((sec_ptr
[0] = alloc_contig(sector_size
, 0, NULL
)) == NULL
)
1868 panic("unable to allocate memory");
1870 if ((sec_ptr
[1] = alloc_contig(sector_size
, 0, NULL
)) == NULL
)
1871 panic("unable to allocate memory");
1874 sum
= fill_rand(buf_ptr
, buf_size
);
1876 for (i
= 0; i
< 5; i
++)
1877 ssum
[i
] = get_sum(buf_ptr
+ sector_size
* i
,
1880 simple_xfer(driver_minor
, base_pos
, buf_ptr
, buf_size
, TRUE
,
1883 got_result(&res
, "write several sectors");
1886 fill_rand(buf_ptr
, buf_size
);
1888 simple_xfer(driver_minor
, base_pos
, buf_ptr
, buf_size
, FALSE
, buf_size
,
1892 test_sum(buf_ptr
, buf_size
, sum
, TRUE
, &res
);
1895 for (i
= 0; i
< 5; i
++)
1896 ssum
[i
] = get_sum(buf_ptr
+ sector_size
* i
,
1900 got_result(&res
, "read several sectors");
1902 /* We do nine subtests. The first three involve only the second sector;
1903 * the second three involve the second and third sectors, and the third
1904 * three involve all of the middle sectors. Each triplet tests small
1905 * elements at the left, at the right, and at both the left and the
1906 * right of the area. For each operation, we first do an unaligned
1907 * read, and if writing is enabled, an unaligned write and an aligned
1910 for (i
= 0; i
< 9; i
++) {
1911 unaligned_size_io(base_pos
, buf_ptr
, buf_size
, sec_ptr
,
1912 i
/ 3 + 1, i
% 3, ssum
);
1915 /* If writing was enabled, make sure that the first and fifth sector
1916 * have remained untouched.
1919 fill_rand(buf_ptr
, buf_size
);
1921 simple_xfer(driver_minor
, base_pos
, buf_ptr
, buf_size
, FALSE
,
1924 test_sum(buf_ptr
, sector_size
, ssum
[0], TRUE
, &res
);
1925 test_sum(buf_ptr
+ sector_size
* 4, sector_size
, ssum
[4], TRUE
,
1928 got_result(&res
, "check first and last sectors");
1932 free_contig(sec_ptr
[1], sector_size
);
1933 free_contig(sec_ptr
[0], sector_size
);
1934 free_contig(buf_ptr
, buf_size
);
1937 static void unaligned_pos1(void)
1939 /* Test sector-unaligned positions and total sizes for requests. This
1940 * is a read-only test as no driver currently supports sector-unaligned
1941 * writes. In this context, the term "lead" means an unwanted first
1942 * part of a sector, and "trail" means an unwanted last part of a
1945 u8_t
*buf_ptr
, *buf2_ptr
;
1946 size_t buf_size
, buf2_size
, size
;
1951 test_group("sector-unaligned positions, part one",
1952 min_read
!= sector_size
);
1954 /* We can only do this test if the driver allows small read requests.
1956 if (min_read
== sector_size
)
1959 assert(sector_size
% min_read
== 0);
1960 assert(min_read
% element_size
== 0);
1962 /* Establish a baseline by writing and reading back three sectors; or
1963 * by reading only, if writing is disabled.
1965 buf_size
= buf2_size
= sector_size
* 3;
1967 base_pos
= cvu64(sector_size
* 3);
1969 if ((buf_ptr
= alloc_contig(buf_size
, 0, NULL
)) == NULL
)
1970 panic("unable to allocate memory");
1972 if ((buf2_ptr
= alloc_contig(buf2_size
, 0, NULL
)) == NULL
)
1973 panic("unable to allocate memory");
1976 sum
= fill_rand(buf_ptr
, buf_size
);
1978 simple_xfer(driver_minor
, base_pos
, buf_ptr
, buf_size
, TRUE
,
1981 got_result(&res
, "write several sectors");
1984 fill_rand(buf_ptr
, buf_size
);
1986 simple_xfer(driver_minor
, base_pos
, buf_ptr
, buf_size
, FALSE
, buf_size
,
1990 test_sum(buf_ptr
, buf_size
, sum
, TRUE
, &res
);
1992 got_result(&res
, "read several sectors");
1994 /* Start with a simple test that operates within a single sector,
1995 * first using a lead.
1997 fill_rand(buf2_ptr
, sector_size
);
1998 sum
= get_sum(buf2_ptr
+ min_read
, sector_size
- min_read
);
2000 simple_xfer(driver_minor
, add64u(base_pos
, sector_size
- min_read
),
2001 buf2_ptr
, min_read
, FALSE
, min_read
, &res
);
2003 test_sum(buf2_ptr
, min_read
, get_sum(buf_ptr
+ sector_size
- min_read
,
2004 min_read
), TRUE
, &res
);
2005 test_sum(buf2_ptr
+ min_read
, sector_size
- min_read
, sum
, TRUE
,
2008 got_result(&res
, "single sector read with lead");
2011 fill_rand(buf2_ptr
, sector_size
);
2012 sum
= get_sum(buf2_ptr
, sector_size
- min_read
);
2014 simple_xfer(driver_minor
, base_pos
, buf2_ptr
+ sector_size
- min_read
,
2015 min_read
, FALSE
, min_read
, &res
);
2017 test_sum(buf2_ptr
+ sector_size
- min_read
, min_read
, get_sum(buf_ptr
,
2018 min_read
), TRUE
, &res
);
2019 test_sum(buf2_ptr
, sector_size
- min_read
, sum
, TRUE
, &res
);
2021 got_result(&res
, "single sector read with trail");
2023 /* And then a lead and a trail, unless min_read is half the sector
2024 * size, in which case this will be another lead test.
2026 fill_rand(buf2_ptr
, sector_size
);
2027 sum
= get_sum(buf2_ptr
, min_read
);
2028 sum2
= get_sum(buf2_ptr
+ min_read
* 2, sector_size
- min_read
* 2);
2030 simple_xfer(driver_minor
, add64u(base_pos
, min_read
),
2031 buf2_ptr
+ min_read
, min_read
, FALSE
, min_read
, &res
);
2033 test_sum(buf2_ptr
+ min_read
, min_read
, get_sum(buf_ptr
+ min_read
,
2034 min_read
), TRUE
, &res
);
2035 test_sum(buf2_ptr
, min_read
, sum
, TRUE
, &res
);
2036 test_sum(buf2_ptr
+ min_read
* 2, sector_size
- min_read
* 2, sum2
,
2039 got_result(&res
, "single sector read with lead and trail");
2041 /* Now do the same but with three sectors, and still only one I/O
2042 * vector element. First up: lead.
2044 size
= min_read
+ sector_size
* 2;
2046 fill_rand(buf2_ptr
, buf2_size
);
2047 sum
= get_sum(buf2_ptr
+ size
, buf2_size
- size
);
2049 simple_xfer(driver_minor
, add64u(base_pos
, sector_size
- min_read
),
2050 buf2_ptr
, size
, FALSE
, size
, &res
);
2052 test_sum(buf2_ptr
, size
, get_sum(buf_ptr
+ sector_size
- min_read
,
2054 test_sum(buf2_ptr
+ size
, buf2_size
- size
, sum
, TRUE
, &res
);
2056 got_result(&res
, "multisector read with lead");
2059 fill_rand(buf2_ptr
, buf2_size
);
2060 sum
= get_sum(buf2_ptr
+ size
, buf2_size
- size
);
2062 simple_xfer(driver_minor
, base_pos
, buf2_ptr
, size
, FALSE
, size
, &res
);
2064 test_sum(buf2_ptr
, size
, get_sum(buf_ptr
, size
), TRUE
, &res
);
2065 test_sum(buf2_ptr
+ size
, buf2_size
- size
, sum
, TRUE
, &res
);
2067 got_result(&res
, "multisector read with trail");
2069 /* Then lead and trail. Use sector size as transfer unit to throw off
2070 * simplistic lead/trail detection.
2072 fill_rand(buf2_ptr
, buf2_size
);
2073 sum
= get_sum(buf2_ptr
+ sector_size
, buf2_size
- sector_size
);
2075 simple_xfer(driver_minor
, add64u(base_pos
, min_read
), buf2_ptr
,
2076 sector_size
, FALSE
, sector_size
, &res
);
2078 test_sum(buf2_ptr
, sector_size
, get_sum(buf_ptr
+ min_read
,
2079 sector_size
), TRUE
, &res
);
2080 test_sum(buf2_ptr
+ sector_size
, buf2_size
- sector_size
, sum
, TRUE
,
2083 got_result(&res
, "multisector read with lead and trail");
2086 free_contig(buf2_ptr
, buf2_size
);
2087 free_contig(buf_ptr
, buf_size
);
2090 static void unaligned_pos2(void)
2092 /* Test sector-unaligned positions and total sizes for requests, second
2093 * part. This one tests the use of multiple I/O vector elements, and
2094 * tries to push the limits of the driver by completely filling an I/O
2095 * vector and going up to the maximum request size.
2097 u8_t
*buf_ptr
, *buf2_ptr
;
2098 size_t buf_size
, buf2_size
, max_block
;
2099 u32_t sum
= 0L, sum2
= 0L, rsum
[NR_IOREQS
];
2101 iovec_t iov
[NR_IOREQS
];
2105 test_group("sector-unaligned positions, part two",
2106 min_read
!= sector_size
);
2108 /* We can only do this test if the driver allows small read requests.
2110 if (min_read
== sector_size
)
2113 buf_size
= buf2_size
= max_size
+ sector_size
;
2115 base_pos
= cvu64(sector_size
* 3);
2117 if ((buf_ptr
= alloc_contig(buf_size
, 0, NULL
)) == NULL
)
2118 panic("unable to allocate memory");
2120 if ((buf2_ptr
= alloc_contig(buf2_size
, 0, NULL
)) == NULL
)
2121 panic("unable to allocate memory");
2123 /* First establish a baseline. We need two requests for this, as the
2124 * total area intentionally exceeds the max request size.
2127 sum
= fill_rand(buf_ptr
, max_size
);
2129 simple_xfer(driver_minor
, base_pos
, buf_ptr
, max_size
, TRUE
,
2132 got_result(&res
, "large baseline write");
2134 sum2
= fill_rand(buf_ptr
+ max_size
, sector_size
);
2136 simple_xfer(driver_minor
, add64u(base_pos
, max_size
),
2137 buf_ptr
+ max_size
, sector_size
, TRUE
, sector_size
,
2140 got_result(&res
, "small baseline write");
2143 fill_rand(buf_ptr
, buf_size
);
2145 simple_xfer(driver_minor
, base_pos
, buf_ptr
, max_size
, FALSE
, max_size
,
2149 test_sum(buf_ptr
, max_size
, sum
, TRUE
, &res
);
2151 got_result(&res
, "large baseline read");
2153 simple_xfer(driver_minor
, add64u(base_pos
, max_size
), buf_ptr
+
2154 max_size
, sector_size
, FALSE
, sector_size
, &res
);
2157 test_sum(buf_ptr
+ max_size
, sector_size
, sum2
, TRUE
, &res
);
2159 got_result(&res
, "small baseline read");
2161 /* First construct a full vector with minimal sizes. The resulting area
2162 * may well fall within a single sector, if min_read is small enough.
2164 fill_rand(buf2_ptr
, buf2_size
);
2166 for (i
= 0; i
< NR_IOREQS
; i
++) {
2167 iov
[i
].iov_addr
= (vir_bytes
) buf2_ptr
+ i
* sector_size
;
2168 iov
[i
].iov_size
= min_read
;
2170 rsum
[i
] = get_sum(buf2_ptr
+ i
* sector_size
+ min_read
,
2171 sector_size
- min_read
);
2174 vir_xfer(driver_minor
, add64u(base_pos
, min_read
), iov
, NR_IOREQS
,
2175 FALSE
, min_read
* NR_IOREQS
, &res
);
2177 for (i
= 0; i
< NR_IOREQS
; i
++) {
2178 test_sum(buf2_ptr
+ i
* sector_size
+ min_read
,
2179 sector_size
- min_read
, rsum
[i
], TRUE
, &res
);
2180 memmove(buf2_ptr
+ i
* min_read
, buf2_ptr
+ i
* sector_size
,
2184 test_sum(buf2_ptr
, min_read
* NR_IOREQS
, get_sum(buf_ptr
+ min_read
,
2185 min_read
* NR_IOREQS
), TRUE
, &res
);
2187 got_result(&res
, "small fully unaligned filled vector");
2189 /* Sneak in a maximum sized request with a single I/O vector element,
2190 * unaligned. If the driver splits up such large requests into smaller
2191 * chunks, this tests whether it does so correctly in the presence of
2194 fill_rand(buf2_ptr
, buf2_size
);
2196 simple_xfer(driver_minor
, add64u(base_pos
, min_read
), buf2_ptr
,
2197 max_size
, FALSE
, max_size
, &res
);
2199 test_sum(buf2_ptr
, max_size
, get_sum(buf_ptr
+ min_read
, max_size
),
2202 got_result(&res
, "large fully unaligned single element");
2204 /* Then try with a vector where each element is as large as possible.
2205 * We don't have room to do bounds integrity checking here (we could
2206 * make room, but this may be a lot of memory already).
2208 /* Compute the largest sector multiple which, when multiplied by
2209 * NR_IOREQS, is no more than the maximum transfer size.
2211 max_block
= max_size
/ NR_IOREQS
;
2212 max_block
-= max_block
% sector_size
;
2214 fill_rand(buf2_ptr
, buf2_size
);
2216 for (i
= 0; i
< NR_IOREQS
; i
++) {
2217 iov
[i
].iov_addr
= (vir_bytes
) buf2_ptr
+ i
* max_block
;
2218 iov
[i
].iov_size
= max_block
;
2221 vir_xfer(driver_minor
, add64u(base_pos
, min_read
), iov
, NR_IOREQS
,
2222 FALSE
, max_block
* NR_IOREQS
, &res
);
2224 test_sum(buf2_ptr
, max_block
* NR_IOREQS
, get_sum(buf_ptr
+ min_read
,
2225 max_block
* NR_IOREQS
), TRUE
, &res
);
2227 got_result(&res
, "large fully unaligned filled vector");
2230 free_contig(buf2_ptr
, buf2_size
);
2231 free_contig(buf_ptr
, buf_size
);
2234 static void sweep_area(u64_t base_pos
)
2236 /* Go over an eight-sector area from left (low address) to right (high
2237 * address), reading and optionally writing in three-sector chunks, and
2238 * advancing one sector at a time.
2242 u32_t sum
= 0L, ssum
[8];
2246 buf_size
= sector_size
* 8;
2248 if ((buf_ptr
= alloc_contig(buf_size
, 0, NULL
)) == NULL
)
2249 panic("unable to allocate memory");
2251 /* First (write to, if allowed, and) read from the entire area in one
2252 * go, so that we know the (initial) contents of the area.
2255 sum
= fill_rand(buf_ptr
, buf_size
);
2257 simple_xfer(driver_minor
, base_pos
, buf_ptr
, buf_size
, TRUE
,
2260 got_result(&res
, "write to full area");
2263 fill_rand(buf_ptr
, buf_size
);
2265 simple_xfer(driver_minor
, base_pos
, buf_ptr
, buf_size
, FALSE
, buf_size
,
2269 test_sum(buf_ptr
, buf_size
, sum
, TRUE
, &res
);
2271 for (i
= 0; i
< 8; i
++)
2272 ssum
[i
] = get_sum(buf_ptr
+ sector_size
* i
, sector_size
);
2274 got_result(&res
, "read from full area");
2276 /* For each of the six three-sector subareas, first read from the
2277 * subarea, check its checksum, and then (if allowed) write new content
2280 for (i
= 0; i
< 6; i
++) {
2281 fill_rand(buf_ptr
, sector_size
* 3);
2283 simple_xfer(driver_minor
, add64u(base_pos
, sector_size
* i
),
2284 buf_ptr
, sector_size
* 3, FALSE
, sector_size
* 3,
2287 for (j
= 0; j
< 3; j
++)
2288 test_sum(buf_ptr
+ sector_size
* j
, sector_size
,
2289 ssum
[i
+ j
], TRUE
, &res
);
2291 got_result(&res
, "read from subarea");
2296 fill_rand(buf_ptr
, sector_size
* 3);
2298 simple_xfer(driver_minor
, add64u(base_pos
, sector_size
* i
),
2299 buf_ptr
, sector_size
* 3, TRUE
, sector_size
* 3, &res
);
2301 for (j
= 0; j
< 3; j
++)
2302 ssum
[i
+ j
] = get_sum(buf_ptr
+ sector_size
* j
,
2305 got_result(&res
, "write to subarea");
2308 /* Finally, if writing was enabled, do one final readback. */
2310 fill_rand(buf_ptr
, buf_size
);
2312 simple_xfer(driver_minor
, base_pos
, buf_ptr
, buf_size
, FALSE
,
2315 for (i
= 0; i
< 8; i
++)
2316 test_sum(buf_ptr
+ sector_size
* i
, sector_size
,
2317 ssum
[i
], TRUE
, &res
);
2319 got_result(&res
, "readback from full area");
2323 free_contig(buf_ptr
, buf_size
);
2326 static void sweep_and_check(u64_t pos
, int check_integ
)
2328 /* Perform an area sweep at the given position. If asked for, get an
2329 * integrity checksum over the beginning of the disk (first writing
2330 * known data into it if that is allowed) before doing the sweep, and
2331 * test the integrity checksum against the disk contents afterwards.
2339 buf_size
= sector_size
* 3;
2341 if ((buf_ptr
= alloc_contig(buf_size
, 0, NULL
)) == NULL
)
2342 panic("unable to allocate memory");
2345 sum
= fill_rand(buf_ptr
, buf_size
);
2347 simple_xfer(driver_minor
, cvu64(0), buf_ptr
, buf_size
,
2348 TRUE
, buf_size
, &res
);
2350 got_result(&res
, "write integrity zone");
2353 fill_rand(buf_ptr
, buf_size
);
2355 simple_xfer(driver_minor
, cvu64(0), buf_ptr
, buf_size
, FALSE
,
2359 test_sum(buf_ptr
, buf_size
, sum
, TRUE
, &res
);
2361 sum
= get_sum(buf_ptr
, buf_size
);
2363 got_result(&res
, "read integrity zone");
2369 fill_rand(buf_ptr
, buf_size
);
2371 simple_xfer(driver_minor
, cvu64(0), buf_ptr
, buf_size
, FALSE
,
2374 test_sum(buf_ptr
, buf_size
, sum
, TRUE
, &res
);
2376 got_result(&res
, "check integrity zone");
2378 free_contig(buf_ptr
, buf_size
);
2382 static void basic_sweep(void)
2384 /* Perform a basic area sweep.
2387 test_group("basic area sweep", TRUE
);
2389 sweep_area(cvu64(sector_size
));
2392 static void high_disk_pos(void)
2394 /* Test 64-bit absolute disk positions. This means that after adding
2395 * partition base to the given position, the driver will be dealing
2396 * with a position above 32 bit. We want to test the transition area
2397 * only; if the entire partition base is above 32 bit, we have already
2398 * effectively performed this test many times over. In other words, for
2399 * this test, the partition must start below 4GB and end above 4GB,
2400 * with at least four sectors on each side.
2404 base_pos
= make64(sector_size
* 4, 1L);
2405 base_pos
= sub64u(base_pos
, rem64u(base_pos
, sector_size
));
2407 /* The partition end must exceed 32 bits. */
2408 if (cmp64(add64(part
.base
, part
.size
), base_pos
) < 0) {
2409 test_group("high disk positions", FALSE
);
2414 base_pos
= sub64u(base_pos
, sector_size
* 8);
2416 /* The partition start must not. */
2417 if (cmp64(base_pos
, part
.base
) < 0) {
2418 test_group("high disk positions", FALSE
);
2422 test_group("high disk positions", TRUE
);
2424 base_pos
= sub64(base_pos
, part
.base
);
2426 sweep_and_check(base_pos
, !cmp64u(part
.base
, 0));
2429 static void high_part_pos(void)
2431 /* Test 64-bit partition-relative disk positions. In other words, use
2432 * within the current partition a position that exceeds a 32-bit value.
2433 * This requires the partition to be more than 4GB in size; we need an
2434 * additional 4 sectors, to be exact.
2438 /* If the partition starts at the beginning of the disk, this test is
2439 * no different from the high disk position test.
2441 if (cmp64u(part
.base
, 0) == 0) {
2442 /* don't complain: the test is simply superfluous now */
2446 base_pos
= make64(sector_size
* 4, 1L);
2447 base_pos
= sub64u(base_pos
, rem64u(base_pos
, sector_size
));
2449 if (cmp64(part
.size
, base_pos
) < 0) {
2450 test_group("high partition positions", FALSE
);
2455 test_group("high partition positions", TRUE
);
2457 base_pos
= sub64u(base_pos
, sector_size
* 8);
2459 sweep_and_check(base_pos
, TRUE
);
2462 static void high_lba_pos1(void)
2464 /* Test 48-bit LBA positions, as opposed to *24-bit*. Drivers that only
2465 * support 48-bit LBA ATA transfers, will treat the lower and upper 24
2466 * bits differently. This is again relative to the disk start, not the
2467 * partition start. For 512-byte sectors, the lowest position exceeding
2468 * 24 bit is at 8GB. As usual, we need four sectors more, and fewer, on
2469 * the other side. The partition that we're operating on, must cover
2474 base_pos
= mul64u(1L << 24, sector_size
);
2476 /* The partition end must exceed the 24-bit sector point. */
2477 if (cmp64(add64(part
.base
, part
.size
), base_pos
) < 0) {
2478 test_group("high LBA positions, part one", FALSE
);
2483 base_pos
= sub64u(base_pos
, sector_size
* 8);
2485 /* The partition start must not. */
2486 if (cmp64(base_pos
, part
.base
) < 0) {
2487 test_group("high LBA positions, part one", FALSE
);
2492 test_group("high LBA positions, part one", TRUE
);
2494 base_pos
= sub64(base_pos
, part
.base
);
2496 sweep_and_check(base_pos
, !cmp64u(part
.base
, 0));
2499 static void high_lba_pos2(void)
2501 /* Test 48-bit LBA positions, as opposed to *28-bit*. That means sector
2502 * numbers in excess of 28-bit values; the old ATA upper limit. The
2503 * same considerations as above apply, except that we now need a 128+GB
2508 base_pos
= mul64u(1L << 28, sector_size
);
2510 /* The partition end must exceed the 28-bit sector point. */
2511 if (cmp64(add64(part
.base
, part
.size
), base_pos
) < 0) {
2512 test_group("high LBA positions, part two", FALSE
);
2517 base_pos
= sub64u(base_pos
, sector_size
* 8);
2519 /* The partition start must not. */
2520 if (cmp64(base_pos
, part
.base
) < 0) {
2521 test_group("high LBA positions, part two", FALSE
);
2526 test_group("high LBA positions, part two", TRUE
);
2528 base_pos
= sub64(base_pos
, part
.base
);
2530 sweep_and_check(base_pos
, !cmp64u(part
.base
, 0));
2533 static void high_pos(void)
2535 /* Check whether the driver deals well with 64-bit positions and
2536 * 48-bit LBA addresses. We test three cases: disk byte position beyond
2537 * what fits in 32 bit, in-partition byte position beyond what fits in
2538 * 32 bit, and disk sector position beyond what fits in 24 bit. With
2539 * the partition we've been given, we may not be able to test all of
2540 * them (or any, for that matter).
2542 /* In certain rare cases, we might be able to perform integrity
2543 * checking on the area that would be affected if a 32-bit/24-bit
2544 * counter were to wrap. More specifically: we can do that if we can
2545 * access the start of the disk. This is why we should be given the
2546 * entire disk as test area if at all possible.
2560 static void open_primary(void)
2562 /* Open the primary device. This call has its own test group.
2565 test_group("device open", TRUE
);
2567 open_device(driver_minor
);
2570 static void close_primary(void)
2572 /* Close the primary device. This call has its own test group.
2575 test_group("device close", TRUE
);
2577 close_device(driver_minor
);
2579 assert(nr_opened
== 0);
2582 static void do_tests(void)
2584 /* Perform all the tests.
2595 /* It is assumed that the driver implementation uses shared
2596 * code paths for read and write for the basic checks, so we do
2597 * not repeat those for writes.
2616 static int sef_cb_init_fresh(int UNUSED(type
), sef_init_info_t
*UNUSED(info
))
2624 optset_parse(optset_table
, env_argv
[1]);
2626 if (driver_label
[0] == '\0')
2627 panic("no driver label given");
2629 if (ds_retrieve_label_endpt(driver_label
, &driver_endpt
))
2630 panic("unable to resolve driver label");
2632 if (driver_minor
> 255)
2633 panic("invalid or no driver minor given");
2635 if ((r
= getuptime(&now
)) != OK
)
2636 panic("unable to get uptime: %d", r
);
2643 static void sef_local_startup(void)
2645 /* Initialize the SEF framework.
2648 sef_setcb_init_fresh(sef_cb_init_fresh
);
2653 int main(int argc
, char **argv
)
2658 env_setargs(argc
, argv
);
2659 sef_local_startup();
2661 printf("BLOCKTEST: driver label '%s' (endpt %d), minor %d\n",
2662 driver_label
, driver_endpt
, driver_minor
);
2666 printf("BLOCKTEST: summary: %d out of %d tests failed "
2667 "across %d group%s; %d driver deaths\n",
2668 failed_tests
, total_tests
, failed_groups
,
2669 failed_groups
== 1 ? "" : "s", driver_deaths
);