<sys/socket.h>: turn off MSG_NOSIGNAL
[minix3.git] / test / test74.c
blob252a2dfbd555199219fbc438997b699c11e462d7
1 /* Test 74 - mmap functionality & regression test.
3 * This test tests some basic functionality of mmap, and also some
4 * cases that are quite complex for the system to handle.
6 * Memory pages are generally made available on demand. Memory copying
7 * is done by the kernel. As the kernel may encounter pagefaults in
8 * legitimate memory ranges (e.g. pages that aren't mapped; pages that
9 * are mapped RO as they are COW), it cooperates with VM to make the
10 * mappings and let the copy succeed transparently.
12 * With file-mapped ranges this can result in a deadlock, if care is
13 * not taken, as the copy might be request by VFS or an FS. This test
14 * triggers as many of these states as possible to ensure they are
15 * successful or (where appropriate) fail gracefully, i.e. without
16 * deadlock.
18 * To do this, system calls are done with source or target buffers with
19 * missing or readonly mappings, both anonymous and file-mapped. The
20 * cache is flushed before mmap() so that we know the mappings should
21 * not be present on mmap() time. Then e.g. a read() or write() is
22 * executed with that buffer as target. This triggers a FS copying
23 * to or from a missing range that it itself is needed to map in first.
24 * VFS detects this, requests VM to map in the pages, which does so with
25 * the help of another VFS thread and the FS, and then re-issues the
26 * request to the FS.
28 * Another case is the VFS itself does such a copy. This is actually
29 * unusual as filenames are already faulted in by the requesting process
30 * in libc by strlen(). select() allows such a case, however, so this
31 * is tested too. We are satisfied if the call completes.
34 #include <sys/types.h>
35 #include <sys/mman.h>
36 #include <sys/ioctl.h>
37 #include <sys/ioc_memory.h>
38 #include <sys/param.h>
39 #include <stdio.h>
40 #include <assert.h>
41 #include <string.h>
42 #include <stdlib.h>
43 #include <unistd.h>
44 #include <fcntl.h>
45 #include <dirent.h>
47 #include "common.h"
48 #include "testcache.h"
50 int max_error = 0; /* make all e()'s fatal */
52 int
53 dowriteblock(int b, int blocksize, u32_t seed, char *data)
55 u64_t offset;
56 int fd;
58 get_fd_offset(b, blocksize, &offset, &fd);
60 if(pwrite(fd, data, blocksize, offset) < blocksize) {
61 perror("pwrite");
62 return -1;
65 return blocksize;
68 int
69 readblock(int b, int blocksize, u32_t seed, char *data)
71 u64_t offset;
72 int fd;
73 char *mmapdata;
74 int pread_first = random() % 2;
76 get_fd_offset(b, blocksize, &offset, &fd);
78 if(pread_first) {
79 if(pread(fd, data, blocksize, offset) < blocksize) {
80 perror("pread");
81 return -1;
85 if((mmapdata = mmap(NULL, blocksize, PROT_READ, MAP_PRIVATE | MAP_FILE,
86 fd, offset)) == MAP_FAILED) {
87 perror("mmap");
88 return -1;
91 if(!pread_first) {
92 if(pread(fd, data, blocksize, offset) < blocksize) {
93 perror("pread");
94 return -1;
98 if(memcmp(mmapdata, data, blocksize)) {
99 fprintf(stderr, "readblock: mmap, pread mismatch\n");
100 return -1;
103 if(munmap(mmapdata, blocksize) < 0) {
104 perror("munmap");
105 return -1;
108 return blocksize;
111 void testend(void) { }
113 static void do_read(void *buf, int fd, int writable)
115 ssize_t ret;
116 size_t n = PAGE_SIZE;
117 struct stat sb;
118 if(fstat(fd, &sb) < 0) e(1);
119 if(S_ISDIR(sb.st_mode)) return;
120 ret = read(fd, buf, n);
122 /* if the buffer is writable, it should succeed */
123 if(writable) { if(ret != n) e(3); return; }
125 /* if the buffer is not writable, it should fail with EFAULT */
126 if(ret >= 0) e(4);
127 if(errno != EFAULT) e(5);
130 static void do_write(void *buf, int fd, int writable)
132 size_t n = PAGE_SIZE;
133 struct stat sb;
134 if(fstat(fd, &sb) < 0) e(1);
135 if(S_ISDIR(sb.st_mode)) return;
136 if(write(fd, buf, n) != n) e(3);
139 static void do_stat(void *buf, int fd, int writable)
141 int r;
142 struct stat sb;
143 r = fstat(fd, (struct stat *) buf);
145 /* should succeed if buf is writable */
146 if(writable) { if(r < 0) e(3); return; }
148 /* should fail with EFAULT if buf is not */
149 if(r >= 0) e(4);
150 if(errno != EFAULT) e(5);
153 static void do_getdents(void *buf, int fd, int writable)
155 struct stat sb;
156 int r;
157 if(fstat(fd, &sb) < 0) e(1);
158 if(!S_ISDIR(sb.st_mode)) return; /* OK */
159 r = getdents(fd, buf, PAGE_SIZE);
160 if(writable) { if(r < 0) e(3); return; }
162 /* should fail with EFAULT if buf is not */
163 if(r >= 0) e(4);
164 if(errno != EFAULT) e(5);
167 static void do_readlink1(void *buf, int fd, int writable)
169 char target[200];
170 /* the system call just has to fail gracefully */
171 readlink(buf, target, sizeof(target));
174 #define NODENAME "a"
175 #define TARGETNAME "b"
177 static void do_readlink2(void *buf, int fd, int writable)
179 ssize_t rl;
180 unlink(NODENAME);
181 if(symlink(TARGETNAME, NODENAME) < 0) e(1);
182 rl=readlink(NODENAME, buf, sizeof(buf));
184 /* if buf is writable, it should succeed, with a certain result */
185 if(writable) {
186 if(rl < 0) e(2);
187 ((char *) buf)[rl] = '\0';
188 if(strcmp(buf, TARGETNAME)) {
189 fprintf(stderr, "readlink: expected %s, got %s\n",
190 TARGETNAME, buf);
191 e(3);
193 return;
196 /* if buf is not writable, it should fail with EFAULT */
197 if(rl >= 0) e(4);
199 if(errno != EFAULT) e(5);
202 static void do_symlink1(void *buf, int fd, int writable)
204 int r;
205 /* the system call just has to fail gracefully */
206 r = symlink(buf, NODENAME);
209 static void do_symlink2(void *buf, int fd, int writable)
211 int r;
212 /* the system call just has to fail gracefully */
213 r = symlink(NODENAME, buf);
216 static void do_open(void *buf, int fd, int writable)
218 int r;
219 /* the system call just has to fail gracefully */
220 r = open(buf, O_RDONLY);
221 if(r >= 0) close(r);
224 static void do_select1(void *buf, int fd, int writable)
226 int r;
227 struct timeval timeout = { 0, 200000 }; /* 0.2 sec */
228 /* the system call just has to fail gracefully */
229 r = select(1, buf, NULL, NULL, &timeout);
232 static void do_select2(void *buf, int fd, int writable)
234 int r;
235 struct timeval timeout = { 0, 200000 }; /* 1 sec */
236 /* the system call just has to fail gracefully */
237 r = select(1, NULL, buf, NULL, &timeout);
240 static void do_select3(void *buf, int fd, int writable)
242 int r;
243 struct timeval timeout = { 0, 200000 }; /* 1 sec */
244 /* the system call just has to fail gracefully */
245 r = select(1, NULL, NULL, buf, &timeout);
248 static void fillfile(int fd, int size)
250 char *buf = malloc(size);
252 if(size < 1 || size % PAGE_SIZE || !buf) { e(1); }
253 memset(buf, 'A', size);
254 buf[50] = '\0'; /* so it can be used as a filename arg */
255 buf[size-1] = '\0';
256 if(write(fd, buf, size) != size) { e(2); }
257 if(lseek(fd, SEEK_SET, 0) < 0) { e(3); }
258 free(buf);
261 static void make_buffers(int size,
262 int *ret_fd_rw, int *ret_fd_ro,
263 void **filebuf_rw, void **filebuf_ro, void **anonbuf)
265 char fn_rw[] = "testfile_rw.XXXXXX", fn_ro[] = "testfile_ro.XXXXXX";
266 *ret_fd_rw = mkstemp(fn_rw);
267 *ret_fd_ro = mkstemp(fn_ro);
269 if(size < 1 || size % PAGE_SIZE) { e(2); }
270 if(*ret_fd_rw < 0) { e(1); }
271 if(*ret_fd_ro < 0) { e(1); }
272 fillfile(*ret_fd_rw, size);
273 fillfile(*ret_fd_ro, size);
274 if(fcntl(*ret_fd_rw, F_FLUSH_FS_CACHE) < 0) { e(4); }
275 if(fcntl(*ret_fd_ro, F_FLUSH_FS_CACHE) < 0) { e(4); }
277 if((*filebuf_rw = mmap(0, size, PROT_READ | PROT_WRITE,
278 MAP_PRIVATE | MAP_FILE, *ret_fd_rw, 0)) == MAP_FAILED) {
279 e(5);
280 quit();
283 if((*filebuf_ro = mmap(0, size, PROT_READ,
284 MAP_PRIVATE | MAP_FILE, *ret_fd_ro, 0)) == MAP_FAILED) {
285 e(5);
286 quit();
289 if((*anonbuf = mmap(0, size, PROT_READ | PROT_WRITE,
290 MAP_PRIVATE | MAP_ANON, -1, 0)) == MAP_FAILED) {
291 e(6);
292 quit();
295 if(unlink(fn_rw) < 0) { e(12); }
296 if(unlink(fn_ro) < 0) { e(12); }
299 static void forget_buffers(void *buf1, void *buf2, void *buf3, int fd1, int fd2, int size)
301 if(munmap(buf1, size) < 0) { e(1); }
302 if(munmap(buf2, size) < 0) { e(2); }
303 if(munmap(buf3, size) < 0) { e(2); }
304 if(fcntl(fd1, F_FLUSH_FS_CACHE) < 0) { e(3); }
305 if(fcntl(fd2, F_FLUSH_FS_CACHE) < 0) { e(3); }
306 if(close(fd1) < 0) { e(4); }
307 if(close(fd2) < 0) { e(4); }
310 #define NEXPERIMENTS 12
311 struct {
312 void (*do_operation)(void * buf, int fd, int writable);
313 } experiments[NEXPERIMENTS] = {
314 { do_read },
315 { do_write },
316 { do_stat },
317 { do_getdents },
318 { do_readlink1 },
319 { do_readlink2 },
320 { do_symlink1 },
321 { do_symlink2 },
322 { do_open, },
323 { do_select1 },
324 { do_select2 },
325 { do_select3 },
328 void test_memory_types_vs_operations(void)
330 #define NFDS 4
331 #define BUFSIZE (10 * PAGE_SIZE)
332 int exp, fds[NFDS];
333 int f = 0, size = BUFSIZE;
335 /* open some test fd's */
336 #define OPEN(fn, mode) { assert(f >= 0 && f < NFDS); \
337 fds[f] = open(fn, mode); if(fds[f] < 0) { e(2); } f++; }
338 OPEN("regular", O_RDWR | O_CREAT);
339 OPEN(".", O_RDONLY);
340 OPEN("/dev/ram", O_RDWR);
341 OPEN("/dev/zero", O_RDWR);
343 /* make sure the regular file has plenty of size to play with */
344 fillfile(fds[0], BUFSIZE);
346 /* and the ramdisk too */
347 if(ioctl(fds[2], MIOCRAMSIZE, &size) < 0) { e(3); }
349 for(exp = 0; exp < NEXPERIMENTS; exp++) {
350 for(f = 0; f < NFDS; f++) {
351 void *anonmem, *filemem_rw, *filemem_ro;
352 int buffd_rw, buffd_ro;
354 make_buffers(BUFSIZE, &buffd_rw, &buffd_ro,
355 &filemem_rw, &filemem_ro, &anonmem);
357 if(lseek(fds[f], 0, SEEK_SET) != 0) { e(10); }
358 experiments[exp].do_operation(anonmem, fds[f], 1);
360 if(lseek(fds[f], 0, SEEK_SET) != 0) { e(11); }
361 experiments[exp].do_operation(filemem_rw, fds[f], 1);
363 if(lseek(fds[f], 0, SEEK_SET) != 0) { e(12); }
364 experiments[exp].do_operation(filemem_ro, fds[f], 0);
366 forget_buffers(filemem_rw, filemem_ro, anonmem, buffd_rw, buffd_ro, BUFSIZE);
371 void basic_regression(void)
373 int fd, fd1, fd2;
374 ssize_t rb, wr;
375 char buf[PAGE_SIZE*2];
376 void *block, *block1, *block2;
377 #define BLOCKSIZE (PAGE_SIZE*10)
378 block = mmap(0, BLOCKSIZE, PROT_READ | PROT_WRITE,
379 MAP_PRIVATE | MAP_ANON, -1, 0);
381 if(block == MAP_FAILED) { e(1); }
383 memset(block, 0, BLOCKSIZE);
385 /* shrink from bottom */
386 munmap(block, PAGE_SIZE);
388 /* Next test: use a system call write() to access a block of
389 * unavailable file-mapped memory.
391 * This is a thorny corner case to make succeed transparently
392 * because
393 * (1) it is a filesystem that is doing the memory access
394 * (copy from the constblock1 range in this process to the
395 * FS) but is also the FS needed to satisfy the range if it
396 * isn't in the cache.
397 * (2) there are two separate memory regions involved, requiring
398 * separate VFS requests from VM to properly satisfy, requiring
399 * some complex state to be kept.
402 fd1 = open("../testsh1", O_RDONLY);
403 fd2 = open("../testsh2", O_RDONLY);
404 if(fd1 < 0 || fd2 < 0) { e(2); }
406 /* just check that we can't mmap() a file writable */
407 if(mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FILE, fd1, 0) != MAP_FAILED) {
408 e(1);
411 /* check that we can mmap() a file MAP_SHARED readonly */
412 if(mmap(NULL, PAGE_SIZE, PROT_READ, MAP_SHARED | MAP_FILE, fd1, 0) == MAP_FAILED) {
413 e(1);
416 /* clear cache of files before mmap so pages won't be present already */
417 if(fcntl(fd1, F_FLUSH_FS_CACHE) < 0) { e(1); }
418 if(fcntl(fd2, F_FLUSH_FS_CACHE) < 0) { e(1); }
420 #define LOCATION1 (void *) 0x90000000
421 #define LOCATION2 (LOCATION1 + PAGE_SIZE)
422 block1 = mmap(LOCATION1, PAGE_SIZE, PROT_READ, MAP_PRIVATE | MAP_FILE, fd1, 0);
423 if(block1 == MAP_FAILED) { e(4); }
424 if(block1 != LOCATION1) { e(5); }
426 block2 = mmap(LOCATION2, PAGE_SIZE, PROT_READ, MAP_PRIVATE | MAP_FILE, fd2, 0);
427 if(block2 == MAP_FAILED) { e(10); }
428 if(block2 != LOCATION2) { e(11); }
430 unlink("testfile");
431 fd = open("testfile", O_CREAT | O_RDWR);
432 if(fd < 0) { e(15); }
434 /* write() using the mmap()ped memory as buffer */
436 if((wr=write(fd, LOCATION1, sizeof(buf))) != sizeof(buf)) {
437 fprintf(stderr, "wrote %zd bytes instead of %zd\n",
438 wr, sizeof(buf));
439 e(20);
440 quit();
443 /* verify written contents */
445 if((rb=pread(fd, buf, sizeof(buf), 0)) != sizeof(buf)) {
446 if(rb < 0) perror("pread");
447 fprintf(stderr, "wrote %zd bytes\n", wr);
448 fprintf(stderr, "read %zd bytes instead of %zd\n",
449 rb, sizeof(buf));
450 e(21);
451 quit();
454 if(memcmp(buf, LOCATION1, sizeof(buf))) {
455 e(22);
456 quit();
459 close(fd);
460 close(fd1);
461 close(fd2);
466 main(int argc, char *argv[])
468 int iter = 2;
470 start(74);
472 basic_regression();
474 test_memory_types_vs_operations();
476 makefiles(MAXFILES);
478 cachequiet(!bigflag);
479 if(bigflag) iter = 3;
481 /* Try various combinations working set sizes
482 * and block sizes in order to specifically
483 * target the primary cache, then primary+secondary
484 * cache, then primary+secondary cache+secondary
485 * cache eviction.
488 if(dotest(PAGE_SIZE, 100, iter)) e(5);
489 if(dotest(PAGE_SIZE*2, 100, iter)) e(2);
490 if(dotest(PAGE_SIZE*3, 100, iter)) e(3);
491 if(dotest(PAGE_SIZE, 20000, iter)) e(5);
493 if(bigflag) {
494 u32_t totalmem, freemem, cachedmem;
495 if(dotest(PAGE_SIZE, 150000, iter)) e(5);
496 getmem(&totalmem, &freemem, &cachedmem);
497 if(dotest(PAGE_SIZE, totalmem*1.5, iter)) e(6);
500 quit();
502 return 0;