1 /* Test for end-of-file during block device I/O - by D.C. van Moolenbroek */
2 /* This test needs to be run as root; it sets up and uses a VND instance. */
4 * The test should work with all root file system block sizes, but only tests
5 * certain corner cases if the root FS block size is twice the page size.
10 #include <sys/param.h>
13 #include <sys/ioctl.h>
14 #include <minix/partition.h>
19 #define VNCONFIG "/usr/sbin/vnconfig"
21 #define SECTOR_SIZE 512 /* this should be the sector size of VND */
36 static int need_cleanup
= 0;
39 static size_t dev_size
;
43 static size_t block_size
;
44 static size_t page_size
;
47 static char *mmap_ptr
= NULL
;
48 static size_t mmap_size
;
50 static int pipe_fd
[2];
53 * Fill the given buffer with random contents.
56 fill_buf(char * buf
, size_t size
)
60 *buf
++ = lrand48() & 0xff;
64 * Place the elements of the source array in the destination array in random
65 * order. There are probably better ways to do this, but it is morning, and I
66 * haven't had coffee yet, so go away.
69 scramble(int * dst
, const int * src
, int count
)
73 for (i
= 0; i
< count
; i
++)
76 for (i
= count
- 1; i
>= 0; i
--) {
77 j
= lrand48() % (i
+ 1);
86 * Perform I/O using read(2) and check the returned results against the
87 * expected result and the image reference data.
90 io_read(size_t pos
, size_t len
, size_t expected
)
94 assert(len
> 0 && len
<= dev_size
);
95 assert(expected
<= len
);
97 if (lseek(dev_fd
, (off_t
)pos
, SEEK_SET
) != pos
) e(0);
99 memset(dev_buf
, 0, len
);
101 if ((bytes
= read(dev_fd
, dev_buf
, len
)) < 0) e(0);
103 if (bytes
!= expected
) e(0);
105 if (memcmp(&dev_ref
[pos
], dev_buf
, bytes
)) e(0);
109 * Perform I/O using write(2) and check the returned result against the
110 * expected result. Update the image reference data as appropriate.
113 io_write(size_t pos
, size_t len
, size_t expected
)
117 assert(len
> 0 && len
<= dev_size
);
118 assert(expected
<= len
);
120 if (lseek(dev_fd
, (off_t
)pos
, SEEK_SET
) != pos
) e(0);
122 fill_buf(dev_buf
, len
);
124 if ((bytes
= write(dev_fd
, dev_buf
, len
)) < 0) e(0);
126 if (bytes
!= expected
) e(0);
129 assert(pos
+ bytes
<= dev_size
);
131 memcpy(&dev_ref
[pos
], dev_buf
, bytes
);
136 * Test if reading from the given pointer succeeds or not, and return the
140 is_readable(char * ptr
)
146 * If we access the pointer directly, we will get a fatal signal.
147 * Thus, for that to work we would need a child process, making the
148 * whole test slow and noisy. Let a service try the operation instead.
150 r
= write(pipe_fd
[1], ptr
, 1);
153 /* Don't fill up the pipe. */
154 if (read(pipe_fd
[0], &byte
, 1) != 1) e(0);
157 } else if (r
!= -1 || errno
!= EFAULT
)
164 * Perform I/O using mmap(2) and check the returned results against the
165 * expected result and the image reference data. Ensure that bytes beyond the
166 * device end are either zero (on the remainder of the last page) or
167 * inaccessible on pages entirely beyond the device end.
170 io_peek(size_t pos
, size_t len
, size_t expected
)
172 size_t n
, delta
, mapped_size
;
177 delta
= pos
% page_size
;
182 len
= roundup(len
, page_size
);
184 /* Don't bother with the given expected value. Recompute it. */
186 expected
= MIN(dev_size
- pos
, len
);
190 mapped_size
= roundup(dev_size
, page_size
);
192 assert(!(len
% page_size
));
194 ptr
= mmap(NULL
, len
, PROT_READ
, MAP_PRIVATE
| MAP_FILE
, dev_fd
,
198 * As of writing, VM allows memory mapping at any offset and for any
199 * length. At least for block devices, VM should probably be changed
200 * to throw ENXIO for any pages beyond the file end, which in turn
201 * renders all the SIGBUS tests below obsolete.
203 if (ptr
== MAP_FAILED
) {
204 if (pos
+ len
<= mapped_size
) e(0);
205 if (errno
!= ENXIO
) e(0);
214 * Any page that contains any valid part of the mapped device should be
215 * readable and have correct contents for that part. If the last valid
216 * page extends beyond the mapped device, its remainder should be zero.
218 if (pos
< dev_size
) {
219 /* The valid part should have the expected device contents. */
220 if (memcmp(&dev_ref
[pos
], ptr
, expected
)) e(0);
222 /* The remainder, if any, should be zero. */
223 for (n
= expected
; n
% page_size
; n
++)
224 if (ptr
[n
] != 0) e(0);
228 * Any page entirely beyond EOF should not be mapped in. In order to
229 * ensure that is_readable() works, also test pages that are mapped in.
231 for (n
= pos
; n
< pos
+ len
; n
+= page_size
)
232 if (is_readable(&ptr
[n
- pos
]) != (n
< mapped_size
)) e(0);
240 * Perform one of the supported end-of-file access attempts using one I/O
244 do_one_io(int where
, void (* io_proc
)(size_t, size_t, size_t))
250 bytes
= lrand48() % (dev_size
- 1) + 1;
252 io_proc(dev_size
- bytes
- 1, bytes
, bytes
);
257 bytes
= lrand48() % dev_size
+ 1;
259 io_proc(dev_size
- bytes
, bytes
, bytes
);
264 start
= lrand48() % (dev_size
- 1) + 1;
265 bytes
= dev_size
- start
+ 1;
266 assert(start
< dev_size
&& start
+ bytes
> dev_size
);
267 bytes
+= lrand48() % (dev_size
- bytes
+ 1);
269 io_proc(start
, bytes
, dev_size
- start
);
274 bytes
= lrand48() % (dev_size
- 1) + 1;
276 io_proc(dev_size
- bytes
+ 1, bytes
, bytes
- 1);
281 bytes
= lrand48() % dev_size
+ 1;
283 io_proc(dev_size
, bytes
, 0);
288 start
= dev_size
+ lrand48() % dev_size
+ 1;
289 bytes
= lrand48() % dev_size
+ 1;
291 io_proc(start
, bytes
, 0);
301 * Perform I/O operations, testing all the supported end-of-file access
302 * attempts in a random order so as to detect possible problems with caching.
305 do_io(void (* io_proc
)(size_t, size_t, size_t))
307 static const int list
[] = { BEFORE_EOF
, UPTO_EOF
, ACROSS_EOF
,
308 ONEPAST_EOF
, FROM_EOF
, BEYOND_EOF
};
309 static const int count
= sizeof(list
) / sizeof(list
[0]);
312 scramble(where
, list
, count
);
314 for (i
= 0; i
< count
; i
++)
315 do_one_io(where
[i
], io_proc
);
319 * Set up an image file of the given size, assign it to a VND, and open the
320 * resulting block device. The size is size_t because we keep a reference copy
321 * of its entire contents in memory.
324 setup_image(size_t size
)
326 struct part_geom part
;
332 if ((dev_buf
= malloc(dev_size
)) == NULL
) e(0);
333 if ((dev_ref
= malloc(dev_size
)) == NULL
) e(0);
335 if ((fd
= open("image", O_CREAT
| O_TRUNC
| O_RDWR
, 0644)) < 0) e(0);
337 fill_buf(dev_ref
, dev_size
);
339 for (off
= 0; off
< dev_size
; off
+= bytes
) {
340 bytes
= write(fd
, &dev_ref
[off
], dev_size
- off
);
342 if (bytes
<= 0) e(0);
347 status
= system(VNCONFIG
" vnd0 image 2>/dev/null");
348 if (!WIFEXITED(status
)) e(0);
349 if (WEXITSTATUS(status
) != 0) {
350 printf("skipped\n"); /* most likely cause: vnd0 is in use */
357 if ((dev_fd
= open("/dev/vnd0", O_RDWR
)) < 0) e(0);
359 if (ioctl(dev_fd
, DIOCGETP
, &part
) < 0) e(0);
361 if (part
.size
!= dev_size
) e(0);
365 * Clean up the VND we set up previously. This function is also called in case
366 * of an unexpected exit.
376 if (mmap_ptr
!= NULL
) {
377 munmap(mmap_ptr
, mmap_size
);
385 status
= system(VNCONFIG
" -u vnd0 2>/dev/null");
386 if (!WIFEXITED(status
) || WEXITSTATUS(status
) != 0) e(0);
392 * Signal handler for exceptions.
395 got_signal(int __unused sig
)
404 * Clean up the VND and image file we set up previously.
415 if ((fd
= open("image", O_RDONLY
, 0644)) < 0) e(0);
417 for (off
= 0; off
< dev_size
; off
+= bytes
) {
418 bytes
= read(fd
, &dev_buf
[off
], dev_size
- off
);
420 if (bytes
<= 0) e(0);
425 /* Have all changes written back to the device? */
426 if (memcmp(dev_buf
, dev_ref
, dev_size
)) e(0);
435 * Run the full test for a block device with the given size.
443 * Using the three I/O primitives (read, write, peek), we run four
444 * sequences, mainly to test the effects of blocks being cached or not.
445 * We set up a new image for each sequence, because -if everything goes
446 * right- closing the device file also clears all cached blocks for it,
447 * in both the root file system's cache and the VM cache. Note that we
448 * currently do not even attempt to push the blocks out of the root FS'
449 * cache in order to test retrieval from the VM cache, since this would
450 * involve doing a LOT of extra I/O.
452 for (i
= 0; i
< 4; i
++) {
485 * Test program for end-of-file conditions during block device I/O.
490 static const unsigned int blocks
[] = { 1, 4, 3, 5, 2 };
498 signal(SIGINT
, got_signal
);
499 signal(SIGABRT
, got_signal
);
500 signal(SIGSEGV
, got_signal
);
501 signal(SIGBUS
, got_signal
);
502 atexit(cleanup_device
);
506 if (pipe(pipe_fd
) != 0) e(0);
509 * Get the system page size, and align all memory mapping offsets and
512 page_size
= sysconf(_SC_PAGESIZE
);
515 * Get the root file system block size. In the current MINIX3 system
516 * architecture, the root file system's block size determines the
517 * transfer granularity for I/O on unmounted block devices. If this
518 * block size is not a multiple of the page size, we are (currently!)
519 * not expecting memory-mapped block devices to work.
521 if (statvfs("/", &buf
) < 0) e(0);
523 block_size
= buf
.f_bsize
;
525 test_peek
= !(block_size
% page_size
);
527 for (i
= 0; i
< ITERATIONS
; i
++) {
529 * The 'blocks' array is scrambled so as to detect any blocks
530 * left in the VM cache (or not) across runs, just in case.
532 for (j
= 0; j
< sizeof(blocks
) / sizeof(blocks
[0]); j
++) {
533 do_test(blocks
[j
] * block_size
+ SECTOR_SIZE
);
535 do_test(blocks
[j
] * block_size
);
537 do_test(blocks
[j
] * block_size
- SECTOR_SIZE
);