retire nonsymbolic rootdev, dev2name
[minix.git] / test / kernel / sys_vumap / vumaptest.c
blob034aa34719ea7be10aaf9a80beb7c174ab1f72a5
1 /* Test for sys_vumap() - by D.C. van Moolenbroek */
2 #include <minix/drivers.h>
3 #include <minix/ds.h>
4 #include <sys/mman.h>
5 #include <assert.h>
7 #include "com.h"
9 struct buf {
10 int pages;
11 int flags;
12 vir_bytes addr;
13 phys_bytes phys;
15 #define BUF_PREALLOC 0x1 /* if set, immediately allocate the page */
16 #define BUF_ADJACENT 0x2 /* virtually contiguous with the last buffer */
18 static unsigned int count = 0, failures = 0;
20 static int success;
21 static char *fail_file;
22 static int fail_line;
24 static int relay;
25 static endpoint_t endpt;
27 static int verbose;
29 static enum {
30 GE_NONE, /* no exception */
31 GE_REVOKED, /* revoked grant */
32 GE_INVALID /* invalid grant */
33 } grant_exception = GE_NONE;
35 static int grant_access = 0;
37 #define expect(r) expect_f((r), __FILE__, __LINE__)
39 static void alloc_buf(struct buf *buf, phys_bytes next)
41 void *tmp = NULL;
42 vir_bytes addr;
43 size_t len;
44 int r, prealloc, flags;
46 /* is_allocated() cannot handle buffers that are not physically
47 * contiguous, and we cannot guarantee physical contiguity if not
48 * not preallocating.
50 assert((buf->flags & BUF_PREALLOC) || buf->pages == 1);
52 len = buf->pages * PAGE_SIZE;
53 prealloc = (buf->flags & BUF_PREALLOC);
54 flags = MAP_ANON | (prealloc ? (MAP_CONTIG | MAP_PREALLOC) : 0);
56 if (prealloc) {
57 /* Allocate a same-sized piece of memory elsewhere, to make it
58 * very unlikely that the actual piece of memory will end up
59 * being physically contiguous with the last piece.
61 tmp = minix_mmap((void *) (buf->addr + len + PAGE_SIZE), len,
62 PROT_READ | PROT_WRITE, MAP_ANON | MAP_PREALLOC |
63 MAP_CONTIG, -1, 0L);
65 if (tmp == MAP_FAILED)
66 panic("unable to allocate temporary buffer");
69 addr = (vir_bytes) minix_mmap((void *) buf->addr, len,
70 PROT_READ | PROT_WRITE, flags, -1, 0L);
72 if (addr != buf->addr)
73 panic("unable to allocate buffer (2)");
75 if (!prealloc)
76 return;
78 if ((r = minix_munmap(tmp, len)) != OK)
79 panic("unable to unmap buffer (%d)", errno);
81 if ((r = sys_umap(SELF, VM_D, addr, len, &buf->phys)) < 0)
82 panic("unable to get physical address of buffer (%d)", r);
84 if (buf->phys != next)
85 return;
87 if (verbose)
88 printf("WARNING: alloc noncontigous range, second try\n");
90 /* Can't remap this to elsewhere, so we run the risk of allocating the
91 * exact same physically contiguous page again. However, now that we've
92 * unmapped the temporary memory also, there's a small chance we'll end
93 * up with a different physical page this time. Who knows.
95 minix_munmap((void *) addr, len);
97 addr = (vir_bytes) minix_mmap((void *) buf->addr, len,
98 PROT_READ | PROT_WRITE, flags, -1, 0L);
100 if (addr != buf->addr)
101 panic("unable to allocate buffer, second try");
103 if ((r = sys_umap(SELF, VM_D, addr, len, &buf->phys)) < 0)
104 panic("unable to get physical address of buffer (%d)", r);
106 /* Still the same page? Screw it. */
107 if (buf->phys == next)
108 panic("unable to allocate noncontiguous range");
111 static void alloc_bufs(struct buf *buf, int count)
113 static vir_bytes base = 0x80000000L;
114 phys_bytes next;
115 int i;
117 /* Allocate the given memory in virtually contiguous blocks whenever
118 * each next buffer is requested to be adjacent. Insert a virtual gap
119 * after each such block. Make sure that each two adjacent buffers in a
120 * block are physically non-contiguous.
122 for (i = 0; i < count; i++) {
123 if (i > 0 && (buf[i].flags & BUF_ADJACENT)) {
124 next = buf[i-1].phys + buf[i-1].pages * PAGE_SIZE;
125 } else {
126 base += PAGE_SIZE * 16;
127 next = 0L;
130 buf[i].addr = base;
132 alloc_buf(&buf[i], next);
134 base += buf[i].pages * PAGE_SIZE;
137 #if DEBUG
138 for (i = 0; i < count; i++)
139 printf("Buf %d: %d pages, flags %x, vir %08x, phys %08x\n", i,
140 buf[i].pages, buf[i].flags, buf[i].addr, buf[i].phys);
141 #endif
144 static void free_bufs(struct buf *buf, int count)
146 int i, j, r;
148 for (i = 0; i < count; i++) {
149 for (j = 0; j < buf[i].pages; j++) {
150 r = minix_munmap((void *) (buf[i].addr + j * PAGE_SIZE),
151 PAGE_SIZE);
153 if (r != OK)
154 panic("unable to unmap range (%d)", errno);
159 static int is_allocated(vir_bytes addr, size_t bytes, phys_bytes *phys)
161 int r;
163 /* This will have to do for now. Of course, we could use sys_vumap with
164 * VUA_READ for this, but that would defeat the point of one test. It
165 * is still a decent alternative in case sys_umap's behavior ever
166 * changes, though.
168 r = sys_umap(SELF, VM_D, addr, bytes, phys);
170 return r == OK;
173 static int is_buf_allocated(struct buf *buf)
175 return is_allocated(buf->addr, buf->pages * PAGE_SIZE, &buf->phys);
178 static void test_group(char *name)
180 if (verbose)
181 printf("Test group: %s (%s)\n",
182 name, relay ? "relay" : "local");
185 static void expect_f(int res, char *file, int line)
187 if (!res && success) {
188 success = FALSE;
189 fail_file = file;
190 fail_line = line;
194 static void got_result(char *desc)
196 count++;
198 if (!success) {
199 failures++;
201 printf("#%02d: %-38s\t[FAIL]\n", count, desc);
202 printf("- failure at %s:%d\n", fail_file, fail_line);
203 } else {
204 if (verbose)
205 printf("#%02d: %-38s\t[PASS]\n", count, desc);
209 static int relay_vumap(struct vumap_vir *vvec, int vcount, size_t offset,
210 int access, struct vumap_phys *pvec, int *pcount)
212 struct vumap_vir gvvec[MAPVEC_NR + 3];
213 cp_grant_id_t vgrant, pgrant;
214 message m;
215 int i, r, gaccess;
217 assert(vcount > 0 && vcount <= MAPVEC_NR + 3);
218 assert(*pcount > 0 && *pcount <= MAPVEC_NR + 3);
220 /* Allow grant access flags to be overridden for testing purposes. */
221 if (!(gaccess = grant_access)) {
222 if (access & VUA_READ) gaccess |= CPF_READ;
223 if (access & VUA_WRITE) gaccess |= CPF_WRITE;
226 for (i = 0; i < vcount; i++) {
227 gvvec[i].vv_grant = cpf_grant_direct(endpt, vvec[i].vv_addr,
228 vvec[i].vv_size, gaccess);
229 assert(gvvec[i].vv_grant != GRANT_INVALID);
230 gvvec[i].vv_size = vvec[i].vv_size;
233 vgrant = cpf_grant_direct(endpt, (vir_bytes) gvvec,
234 sizeof(gvvec[0]) * vcount, CPF_READ);
235 assert(vgrant != GRANT_INVALID);
237 pgrant = cpf_grant_direct(endpt, (vir_bytes) pvec,
238 sizeof(pvec[0]) * *pcount, CPF_WRITE);
239 assert(pgrant != GRANT_INVALID);
241 /* This must be done after allocating all other grants. */
242 if (grant_exception != GE_NONE) {
243 cpf_revoke(gvvec[vcount - 1].vv_grant);
244 if (grant_exception == GE_INVALID)
245 gvvec[vcount - 1].vv_grant = GRANT_INVALID;
248 m.m_type = VTR_RELAY;
249 m.VTR_VGRANT = vgrant;
250 m.VTR_VCOUNT = vcount;
251 m.VTR_OFFSET = offset;
252 m.VTR_ACCESS = access;
253 m.VTR_PGRANT = pgrant;
254 m.VTR_PCOUNT = *pcount;
256 r = sendrec(endpt, &m);
258 cpf_revoke(pgrant);
259 cpf_revoke(vgrant);
261 for (i = 0; i < vcount - !!grant_exception; i++)
262 cpf_revoke(gvvec[i].vv_grant);
264 *pcount = m.VTR_PCOUNT;
266 return (r != OK) ? r : m.m_type;
269 static int do_vumap(endpoint_t endpt, struct vumap_vir *vvec, int vcount,
270 size_t offset, int access, struct vumap_phys *pvec, int *pcount)
272 struct vumap_phys pv_backup[MAPVEC_NR + 3];
273 int r, pc_backup, pv_test = FALSE;
275 /* Make a copy of pvec and pcount for later. */
276 pc_backup = *pcount;
278 /* We cannot compare pvec contents before and after when relaying,
279 * since the original contents are not transferred.
281 if (!relay && pvec != NULL && pc_backup >= 1 &&
282 pc_backup <= MAPVEC_NR + 3) {
283 pv_test = TRUE;
284 memcpy(pv_backup, pvec, sizeof(*pvec) * pc_backup);
287 /* Reset the test result. */
288 success = TRUE;
290 /* Perform the vumap call, either directly or through a relay. */
291 if (relay) {
292 assert(endpt == SELF);
293 r = relay_vumap(vvec, vcount, offset, access, pvec, pcount);
294 } else {
295 r = sys_vumap(endpt, vvec, vcount, offset, access, pvec,
296 pcount);
299 /* Upon failure, pvec and pcount must be unchanged. */
300 if (r != OK) {
301 expect(pc_backup == *pcount);
303 if (pv_test)
304 expect(memcmp(pv_backup, pvec,
305 sizeof(*pvec) * pc_backup) == 0);
308 return r;
311 static void test_basics(void)
313 struct vumap_vir vvec[2];
314 struct vumap_phys pvec[4];
315 struct buf buf[4];
316 int r, pcount;
318 test_group("basics");
320 buf[0].pages = 1;
321 buf[0].flags = BUF_PREALLOC;
322 buf[1].pages = 2;
323 buf[1].flags = BUF_PREALLOC;
324 buf[2].pages = 1;
325 buf[2].flags = BUF_PREALLOC;
326 buf[3].pages = 1;
327 buf[3].flags = BUF_PREALLOC | BUF_ADJACENT;
329 alloc_bufs(buf, 4);
331 /* Test single whole page. */
332 vvec[0].vv_addr = buf[0].addr;
333 vvec[0].vv_size = PAGE_SIZE;
334 pcount = 1;
336 r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
338 expect(r == OK);
339 expect(pcount == 1);
340 expect(pvec[0].vp_addr == buf[0].phys);
341 expect(pvec[0].vp_size == vvec[0].vv_size);
343 got_result("single whole page");
345 /* Test single partial page. */
346 vvec[0].vv_addr = buf[0].addr + 123;
347 vvec[0].vv_size = PAGE_SIZE - 456;
348 pcount = 1;
350 r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
352 expect(r == OK);
353 expect(pcount == 1);
354 expect(pvec[0].vp_addr == buf[0].phys + 123);
355 expect(pvec[0].vp_size == vvec[0].vv_size);
357 got_result("single partial page");
359 /* Test multiple contiguous whole pages. */
360 vvec[0].vv_addr = buf[1].addr;
361 vvec[0].vv_size = PAGE_SIZE * 2;
362 pcount = 1;
364 r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
366 expect(r == OK);
367 expect(pcount == 1);
368 expect(pvec[0].vp_addr == buf[1].phys);
369 expect(pvec[0].vp_size == vvec[0].vv_size);
371 got_result("multiple contiguous whole pages");
373 /* Test range in multiple contiguous pages. */
374 vvec[0].vv_addr = buf[1].addr + 234;
375 vvec[0].vv_size = PAGE_SIZE * 2 - 234;
376 pcount = 2;
378 r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
380 expect(r == OK);
381 expect(pcount == 1);
382 expect(pvec[0].vp_addr == buf[1].phys + 234);
383 expect(pvec[0].vp_size == vvec[0].vv_size);
385 got_result("range in multiple contiguous pages");
387 /* Test multiple noncontiguous whole pages. */
388 vvec[0].vv_addr = buf[2].addr;
389 vvec[0].vv_size = PAGE_SIZE * 2;
390 pcount = 3;
392 r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
394 expect(r == OK);
395 expect(pcount == 2);
396 expect(pvec[0].vp_addr == buf[2].phys);
397 expect(pvec[0].vp_size == PAGE_SIZE);
398 expect(pvec[1].vp_addr == buf[3].phys);
399 expect(pvec[1].vp_size == PAGE_SIZE);
401 got_result("multiple noncontiguous whole pages");
403 /* Test range in multiple noncontiguous pages. */
404 vvec[0].vv_addr = buf[2].addr + 1;
405 vvec[0].vv_size = PAGE_SIZE * 2 - 2;
406 pcount = 2;
408 r = do_vumap(SELF, vvec, 1, 0, VUA_WRITE, pvec, &pcount);
410 expect(r == OK);
411 expect(pcount == 2);
412 expect(pvec[0].vp_addr == buf[2].phys + 1);
413 expect(pvec[0].vp_size == PAGE_SIZE - 1);
414 expect(pvec[1].vp_addr == buf[3].phys);
415 expect(pvec[1].vp_size == PAGE_SIZE - 1);
417 got_result("range in multiple noncontiguous pages");
419 /* Test single-input result truncation. */
420 vvec[0].vv_addr = buf[2].addr + PAGE_SIZE / 2;
421 vvec[0].vv_size = PAGE_SIZE;
422 pvec[1].vp_addr = 0L;
423 pvec[1].vp_size = 0;
424 pcount = 1;
426 r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
428 expect(r == OK);
429 expect(pcount == 1);
430 expect(pvec[0].vp_addr == buf[2].phys + PAGE_SIZE / 2);
431 expect(pvec[0].vp_size == PAGE_SIZE / 2);
432 expect(pvec[1].vp_addr == 0L);
433 expect(pvec[1].vp_size == 0);
435 got_result("single-input result truncation");
437 /* Test multiple inputs, contiguous first. */
438 vvec[0].vv_addr = buf[0].addr;
439 vvec[0].vv_size = PAGE_SIZE;
440 vvec[1].vv_addr = buf[2].addr + PAGE_SIZE - 1;
441 vvec[1].vv_size = 2;
442 pcount = 3;
444 r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvec, &pcount);
446 expect(r == OK);
447 expect(pcount == 3);
448 expect(pvec[0].vp_addr == buf[0].phys);
449 expect(pvec[0].vp_size == PAGE_SIZE);
450 expect(pvec[1].vp_addr == buf[2].phys + PAGE_SIZE - 1);
451 expect(pvec[1].vp_size == 1);
452 expect(pvec[2].vp_addr == buf[3].phys);
453 expect(pvec[2].vp_size == 1);
455 got_result("multiple inputs, contiguous first");
457 /* Test multiple inputs, contiguous last. */
458 vvec[0].vv_addr = buf[2].addr + 123;
459 vvec[0].vv_size = PAGE_SIZE * 2 - 456;
460 vvec[1].vv_addr = buf[1].addr + 234;
461 vvec[1].vv_size = PAGE_SIZE * 2 - 345;
462 pcount = 4;
464 r = do_vumap(SELF, vvec, 2, 0, VUA_WRITE, pvec, &pcount);
466 expect(r == OK);
467 expect(pcount == 3);
468 expect(pvec[0].vp_addr == buf[2].phys + 123);
469 expect(pvec[0].vp_size == PAGE_SIZE - 123);
470 expect(pvec[1].vp_addr == buf[3].phys);
471 expect(pvec[1].vp_size == PAGE_SIZE - (456 - 123));
472 expect(pvec[2].vp_addr == buf[1].phys + 234);
473 expect(pvec[2].vp_size == vvec[1].vv_size);
475 got_result("multiple inputs, contiguous last");
477 /* Test multiple-inputs result truncation. */
478 vvec[0].vv_addr = buf[2].addr + 2;
479 vvec[0].vv_size = PAGE_SIZE * 2 - 3;
480 vvec[1].vv_addr = buf[0].addr;
481 vvec[1].vv_size = 135;
482 pvec[2].vp_addr = 0xDEADBEEFL;
483 pvec[2].vp_size = 1234;
484 pcount = 2;
486 r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvec, &pcount);
488 expect(r == OK);
489 expect(pcount == 2);
490 expect(pvec[0].vp_addr == buf[2].phys + 2);
491 expect(pvec[0].vp_size == PAGE_SIZE - 2);
492 expect(pvec[1].vp_addr == buf[3].phys);
493 expect(pvec[1].vp_size == PAGE_SIZE - 1);
494 expect(pvec[2].vp_addr == 0xDEADBEEFL);
495 expect(pvec[2].vp_size == 1234);
497 got_result("multiple-inputs result truncation");
499 free_bufs(buf, 4);
502 static void test_endpt(void)
504 struct vumap_vir vvec[1];
505 struct vumap_phys pvec[1];
506 struct buf buf[1];
507 int r, pcount;
509 test_group("endpoint");
511 buf[0].pages = 1;
512 buf[0].flags = BUF_PREALLOC;
514 alloc_bufs(buf, 1);
516 /* Test NONE endpoint. */
517 vvec[0].vv_addr = buf[0].addr;
518 vvec[0].vv_size = PAGE_SIZE;
519 pcount = 1;
521 r = do_vumap(NONE, vvec, 1, 0, VUA_READ, pvec, &pcount);
523 expect(r == EINVAL);
525 got_result("NONE endpoint");
527 /* Test ANY endpoint. */
528 vvec[0].vv_addr = buf[0].addr;
529 vvec[0].vv_size = PAGE_SIZE;
530 pcount = 1;
532 r = do_vumap(ANY, vvec, 1, 0, VUA_READ, pvec, &pcount);
534 expect(r == EINVAL);
536 got_result("ANY endpoint");
538 free_bufs(buf, 1);
541 static void test_vector1(void)
543 struct vumap_vir vvec[2];
544 struct vumap_phys pvec[3];
545 struct buf buf[2];
546 int r, pcount;
548 test_group("vector, part 1");
550 buf[0].pages = 2;
551 buf[0].flags = BUF_PREALLOC;
552 buf[1].pages = 1;
553 buf[1].flags = BUF_PREALLOC;
555 alloc_bufs(buf, 2);
557 /* Test zero virtual memory size. */
558 vvec[0].vv_addr = buf[0].addr;
559 vvec[0].vv_size = PAGE_SIZE * 2;
560 vvec[1].vv_addr = buf[1].addr;
561 vvec[1].vv_size = 0;
562 pcount = 3;
564 r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvec, &pcount);
566 expect(r == EINVAL);
568 got_result("zero virtual memory size");
570 /* Test excessive virtual memory size. */
571 vvec[1].vv_size = (vir_bytes) -1;
573 r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvec, &pcount);
575 expect(r == EFAULT || r == EPERM);
577 got_result("excessive virtual memory size");
579 /* Test invalid virtual memory. */
580 vvec[1].vv_addr = 0L;
581 vvec[1].vv_size = PAGE_SIZE;
583 r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvec, &pcount);
585 expect(r == EFAULT);
587 got_result("invalid virtual memory");
589 /* Test virtual memory overrun. */
590 vvec[0].vv_size++;
591 vvec[1].vv_addr = buf[1].addr;
593 r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvec, &pcount);
595 expect(r == EFAULT);
597 got_result("virtual memory overrun");
599 free_bufs(buf, 2);
602 static void test_vector2(void)
604 struct vumap_vir vvec[2], *vvecp;
605 struct vumap_phys pvec[3], *pvecp;
606 struct buf buf[2];
607 phys_bytes dummy;
608 int r, pcount;
610 test_group("vector, part 2");
612 buf[0].pages = 2;
613 buf[0].flags = BUF_PREALLOC;
614 buf[1].pages = 1;
615 buf[1].flags = BUF_PREALLOC;
617 alloc_bufs(buf, 2);
619 /* Test zero virtual count. */
620 vvec[0].vv_addr = buf[0].addr;
621 vvec[0].vv_size = PAGE_SIZE * 2;
622 vvec[1].vv_addr = buf[1].addr;
623 vvec[1].vv_size = PAGE_SIZE;
624 pcount = 3;
626 r = do_vumap(SELF, vvec, 0, 0, VUA_READ, pvec, &pcount);
628 expect(r == EINVAL);
630 got_result("zero virtual count");
632 /* Test negative virtual count. */
633 r = do_vumap(SELF, vvec, -1, 0, VUA_WRITE, pvec, &pcount);
635 expect(r == EINVAL);
637 got_result("negative virtual count");
639 /* Test zero physical count. */
640 pcount = 0;
642 r = do_vumap(SELF, vvec, 2, 0, VUA_WRITE, pvec, &pcount);
644 expect(r == EINVAL);
646 got_result("zero physical count");
648 /* Test negative physical count. */
649 pcount = -1;
651 r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvec, &pcount);
653 expect(r == EINVAL);
655 got_result("negative physical count");
657 /* Test invalid virtual vector pointer. */
658 pcount = 2;
660 r = do_vumap(SELF, NULL, 2, 0, VUA_READ, pvec, &pcount);
662 expect(r == EFAULT);
664 got_result("invalid virtual vector pointer");
666 /* Test unallocated virtual vector. */
667 vvecp = (struct vumap_vir *) minix_mmap(NULL, PAGE_SIZE,
668 PROT_READ | PROT_WRITE, MAP_ANON, -1, 0L);
670 if (vvecp == MAP_FAILED)
671 panic("unable to allocate virtual vector");
673 r = do_vumap(SELF, vvecp, 2, 0, VUA_READ, pvec, &pcount);
675 expect(r == EFAULT);
676 expect(!is_allocated((vir_bytes) vvecp, PAGE_SIZE, &dummy));
678 got_result("unallocated virtual vector pointer");
680 minix_munmap((void *) vvecp, PAGE_SIZE);
682 /* Test invalid physical vector pointer. */
683 r = do_vumap(SELF, vvec, 2, 0, VUA_READ, NULL, &pcount);
685 expect(r == EFAULT);
687 got_result("invalid physical vector pointer");
689 /* Test unallocated physical vector. */
690 pvecp = (struct vumap_phys *) minix_mmap(NULL, PAGE_SIZE,
691 PROT_READ | PROT_WRITE, MAP_ANON, -1, 0L);
693 if (pvecp == MAP_FAILED)
694 panic("unable to allocate physical vector");
696 r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvecp, &pcount);
698 expect(r == OK);
699 expect(is_allocated((vir_bytes) pvecp, PAGE_SIZE, &dummy));
700 expect(pcount == 2);
701 expect(pvecp[0].vp_size == PAGE_SIZE * 2);
702 expect(pvecp[0].vp_addr == buf[0].phys);
703 expect(pvecp[1].vp_size == PAGE_SIZE);
704 expect(pvecp[1].vp_addr == buf[1].phys);
706 got_result("unallocated physical vector pointer");
708 minix_munmap((void *) pvecp, PAGE_SIZE);
710 free_bufs(buf, 2);
713 static void test_grant(void)
715 struct vumap_vir vvec[2];
716 struct vumap_phys pvec[3];
717 struct buf buf[2];
718 int r, pcount;
720 test_group("grant");
722 buf[0].pages = 1;
723 buf[0].flags = BUF_PREALLOC;
724 buf[1].pages = 2;
725 buf[1].flags = BUF_PREALLOC;
727 alloc_bufs(buf, 2);
729 /* Test write-only access on read-only grant. */
730 grant_access = CPF_READ; /* override */
732 vvec[0].vv_addr = buf[0].addr;
733 vvec[0].vv_size = PAGE_SIZE;
734 pcount = 1;
736 r = do_vumap(SELF, vvec, 1, 0, VUA_WRITE, pvec, &pcount);
738 expect(r == EPERM);
740 got_result("write-only access on read-only grant");
742 /* Test read-write access on read-only grant. */
743 r = do_vumap(SELF, vvec, 1, 0, VUA_READ | VUA_WRITE, pvec, &pcount);
745 expect(r == EPERM);
747 got_result("read-write access on read-only grant");
749 /* Test read-only access on write-only grant. */
750 grant_access = CPF_WRITE; /* override */
752 r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
754 expect(r == EPERM);
756 got_result("read-only access on write-only grant");
758 /* Test read-write access on write grant. */
759 r = do_vumap(SELF, vvec, 1, 0, VUA_READ | VUA_WRITE, pvec, &pcount);
761 expect(r == EPERM);
763 got_result("read-write access on write-only grant");
765 /* Test read-only access on read-write grant. */
766 grant_access = CPF_READ | CPF_WRITE; /* override */
768 r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
770 expect(r == OK);
771 expect(pcount == 1);
772 expect(pvec[0].vp_size == PAGE_SIZE);
773 expect(pvec[0].vp_addr == buf[0].phys);
775 got_result("read-only access on read-write grant");
777 grant_access = 0; /* reset */
779 /* Test invalid grant. */
780 grant_exception = GE_INVALID;
782 vvec[0].vv_addr = buf[0].addr;
783 vvec[0].vv_size = PAGE_SIZE;
784 vvec[1].vv_addr = buf[1].addr;
785 vvec[1].vv_size = PAGE_SIZE * 2;
786 pcount = 3;
788 r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvec, &pcount);
790 expect(r == EINVAL);
792 got_result("invalid grant");
794 /* Test revoked grant. */
795 grant_exception = GE_REVOKED;
797 r = do_vumap(SELF, vvec, 2, 0, VUA_READ, pvec, &pcount);
799 expect(r == EPERM);
801 got_result("revoked grant");
803 grant_exception = GE_NONE;
805 free_bufs(buf, 2);
808 static void test_offset(void)
810 struct vumap_vir vvec[2];
811 struct vumap_phys pvec[3];
812 struct buf buf[4];
813 size_t off, off2;
814 int r, pcount;
816 test_group("offsets");
818 buf[0].pages = 1;
819 buf[0].flags = BUF_PREALLOC;
820 buf[1].pages = 2;
821 buf[1].flags = BUF_PREALLOC;
822 buf[2].pages = 1;
823 buf[2].flags = BUF_PREALLOC;
824 buf[3].pages = 1;
825 buf[3].flags = BUF_PREALLOC | BUF_ADJACENT;
827 alloc_bufs(buf, 4);
829 /* Test offset into aligned page. */
830 off = 123;
831 vvec[0].vv_addr = buf[0].addr;
832 vvec[0].vv_size = PAGE_SIZE;
833 pcount = 2;
835 r = do_vumap(SELF, vvec, 1, off, VUA_READ, pvec, &pcount);
837 expect(r == OK);
838 expect(pcount == 1);
839 expect(pvec[0].vp_addr == buf[0].phys + off);
840 expect(pvec[0].vp_size == vvec[0].vv_size - off);
842 got_result("offset into aligned page");
844 /* Test offset into unaligned page. */
845 off2 = 456;
846 assert(off + off2 < PAGE_SIZE);
847 vvec[0].vv_addr = buf[0].addr + off;
848 vvec[0].vv_size = PAGE_SIZE - off;
849 pcount = 2;
851 r = do_vumap(SELF, vvec, 1, off2, VUA_READ, pvec, &pcount);
853 expect(r == OK);
854 expect(pcount == 1);
855 expect(pvec[0].vp_addr == buf[0].phys + off + off2);
856 expect(pvec[0].vp_size == vvec[0].vv_size - off2);
858 got_result("offset into unaligned page");
860 /* Test offset into unaligned page set. */
861 off = 1234;
862 off2 = 567;
863 assert(off + off2 < PAGE_SIZE);
864 vvec[0].vv_addr = buf[1].addr + off;
865 vvec[0].vv_size = (PAGE_SIZE - off) * 2;
866 pcount = 3;
868 r = do_vumap(SELF, vvec, 1, off2, VUA_READ, pvec, &pcount);
870 expect(r == OK);
871 expect(pcount == 1);
872 expect(pvec[0].vp_addr == buf[1].phys + off + off2);
873 expect(pvec[0].vp_size == vvec[0].vv_size - off2);
875 got_result("offset into contiguous page set");
877 /* Test offset into noncontiguous page set. */
878 vvec[0].vv_addr = buf[2].addr + off;
879 vvec[0].vv_size = (PAGE_SIZE - off) * 2;
880 pcount = 3;
882 r = do_vumap(SELF, vvec, 1, off2, VUA_READ, pvec, &pcount);
884 expect(r == OK);
885 expect(pcount == 2);
886 expect(pvec[0].vp_addr == buf[2].phys + off + off2);
887 expect(pvec[0].vp_size == PAGE_SIZE - off - off2);
888 expect(pvec[1].vp_addr == buf[3].phys);
889 expect(pvec[1].vp_size == PAGE_SIZE - off);
891 got_result("offset into noncontiguous page set");
893 /* Test offset to last byte. */
894 off = PAGE_SIZE - off2 - 1;
895 vvec[0].vv_addr = buf[0].addr + off2;
896 vvec[0].vv_size = PAGE_SIZE - off2;
897 pcount = 2;
899 r = do_vumap(SELF, vvec, 1, off, VUA_READ, pvec, &pcount);
901 expect(r == OK);
902 expect(pcount == 1);
903 expect(pvec[0].vp_addr == buf[0].phys + off + off2);
904 expect(pvec[0].vp_size == 1);
906 got_result("offset to last byte");
908 /* Test offset at range end. */
909 off = 234;
910 vvec[0].vv_addr = buf[1].addr + off;
911 vvec[0].vv_size = PAGE_SIZE - off * 2;
912 vvec[1].vv_addr = vvec[0].vv_addr + vvec[0].vv_size;
913 vvec[1].vv_size = off;
915 r = do_vumap(SELF, vvec, 2, vvec[0].vv_size, VUA_READ, pvec, &pcount);
917 expect(r == EINVAL);
919 got_result("offset at range end");
921 /* Test offset beyond range end. */
922 vvec[0].vv_addr = buf[1].addr;
923 vvec[0].vv_size = PAGE_SIZE;
924 vvec[1].vv_addr = buf[1].addr + PAGE_SIZE;
925 vvec[1].vv_size = PAGE_SIZE;
927 r = do_vumap(SELF, vvec, 2, PAGE_SIZE + off, VUA_READ, pvec, &pcount);
929 expect(r == EINVAL);
931 got_result("offset beyond range end");
933 /* Test negative offset. */
934 vvec[0].vv_addr = buf[1].addr + off + off2;
935 vvec[0].vv_size = PAGE_SIZE;
937 r = do_vumap(SELF, vvec, 1, (size_t) -1, VUA_READ, pvec, &pcount);
939 expect(r == EINVAL);
941 got_result("negative offset");
943 free_bufs(buf, 4);
946 static void test_access(void)
948 struct vumap_vir vvec[3];
949 struct vumap_phys pvec[4], *pvecp;
950 struct buf buf[7];
951 int i, r, pcount, pindex;
953 test_group("access");
955 buf[0].pages = 1;
956 buf[0].flags = 0;
957 buf[1].pages = 1;
958 buf[1].flags = BUF_PREALLOC | BUF_ADJACENT;
959 buf[2].pages = 1;
960 buf[2].flags = BUF_ADJACENT;
962 alloc_bufs(buf, 3);
964 /* Test no access flags. */
965 vvec[0].vv_addr = buf[0].addr;
966 vvec[0].vv_size = PAGE_SIZE * 3;
967 pcount = 4;
969 r = do_vumap(SELF, vvec, 1, 0, 0, pvec, &pcount);
971 expect(r == EINVAL);
972 expect(!is_buf_allocated(&buf[0]));
973 expect(is_buf_allocated(&buf[1]));
974 expect(!is_buf_allocated(&buf[2]));
976 got_result("no access flags");
978 /* Test read-only access. */
979 vvec[0].vv_addr = buf[0].addr;
980 vvec[0].vv_size = PAGE_SIZE * 3;
981 pcount = 1;
983 r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
985 expect(r == EFAULT);
986 expect(!is_buf_allocated(&buf[0]));
987 expect(is_buf_allocated(&buf[1]));
988 expect(!is_buf_allocated(&buf[2]));
990 got_result("read-only access");
992 /* Test read-write access. */
993 vvec[0].vv_addr = buf[0].addr;
994 vvec[0].vv_size = PAGE_SIZE * 3;
995 pcount = 4;
997 r = do_vumap(SELF, vvec, 1, 0, VUA_READ | VUA_WRITE, pvec, &pcount);
999 expect(r == EFAULT);
1000 expect(!is_buf_allocated(&buf[0]));
1001 expect(is_buf_allocated(&buf[1]));
1002 expect(!is_buf_allocated(&buf[2]));
1004 got_result("read-write access");
1006 /* Test write-only access. */
1007 vvec[0].vv_addr = buf[0].addr;
1008 vvec[0].vv_size = PAGE_SIZE * 3;
1009 pcount = 4;
1011 r = do_vumap(SELF, vvec, 1, 0, VUA_WRITE, pvec, &pcount);
1013 expect(r == OK);
1014 /* We don't control the physical addresses of the faulted-in pages, so
1015 * they may or may not end up being contiguous with their neighbours.
1017 expect(pcount >= 1 && pcount <= 3);
1018 expect(is_buf_allocated(&buf[0]));
1019 expect(is_buf_allocated(&buf[1]));
1020 expect(is_buf_allocated(&buf[2]));
1021 expect(pvec[0].vp_addr == buf[0].phys);
1022 switch (pcount) {
1023 case 1:
1024 expect(pvec[0].vp_size == PAGE_SIZE * 3);
1025 break;
1026 case 2:
1027 expect(pvec[0].vp_size + pvec[1].vp_size == PAGE_SIZE * 3);
1028 if (pvec[0].vp_size > PAGE_SIZE)
1029 expect(pvec[1].vp_addr == buf[2].phys);
1030 else
1031 expect(pvec[1].vp_addr == buf[1].phys);
1032 break;
1033 case 3:
1034 expect(pvec[0].vp_size == PAGE_SIZE);
1035 expect(pvec[1].vp_addr == buf[1].phys);
1036 expect(pvec[1].vp_size == PAGE_SIZE);
1037 expect(pvec[2].vp_addr == buf[2].phys);
1038 expect(pvec[2].vp_size == PAGE_SIZE);
1039 break;
1042 got_result("write-only access");
1044 free_bufs(buf, 3);
1046 /* Test page faulting. */
1047 buf[0].pages = 1;
1048 buf[0].flags = 0;
1049 buf[1].pages = 1;
1050 buf[1].flags = BUF_PREALLOC | BUF_ADJACENT;
1051 buf[2].pages = 1;
1052 buf[2].flags = 0;
1053 buf[3].pages = 2;
1054 buf[3].flags = BUF_PREALLOC;
1055 buf[4].pages = 1;
1056 buf[4].flags = BUF_ADJACENT;
1057 buf[5].pages = 1;
1058 buf[5].flags = BUF_ADJACENT;
1059 buf[6].pages = 1;
1060 buf[6].flags = 0;
1062 alloc_bufs(buf, 7);
1064 vvec[0].vv_addr = buf[0].addr + PAGE_SIZE - 1;
1065 vvec[0].vv_size = PAGE_SIZE - 1;
1066 vvec[1].vv_addr = buf[2].addr;
1067 vvec[1].vv_size = PAGE_SIZE;
1068 vvec[2].vv_addr = buf[3].addr + 123;
1069 vvec[2].vv_size = PAGE_SIZE * 4 - 456;
1070 pvecp = (struct vumap_phys *) buf[6].addr;
1071 pcount = 7;
1072 assert(sizeof(struct vumap_phys) * pcount <= PAGE_SIZE);
1074 r = do_vumap(SELF, vvec, 3, 0, VUA_WRITE, pvecp, &pcount);
1076 expect(r == OK);
1077 /* Same story but more possibilities. I hope I got this right. */
1078 expect(pcount >= 3 || pcount <= 6);
1079 for (i = 0; i < 7; i++)
1080 expect(is_buf_allocated(&buf[i]));
1081 expect(pvecp[0].vp_addr = buf[0].phys);
1082 if (pvecp[0].vp_size == 1) {
1083 expect(pvecp[1].vp_addr == buf[1].phys);
1084 expect(pvecp[1].vp_size == PAGE_SIZE - 2);
1085 pindex = 2;
1086 } else {
1087 expect(pvecp[0].vp_size == PAGE_SIZE - 1);
1088 pindex = 1;
1090 expect(pvecp[pindex].vp_addr == buf[2].phys);
1091 expect(pvecp[pindex].vp_size == PAGE_SIZE);
1092 pindex++;
1093 expect(pvecp[pindex].vp_addr == buf[3].phys + 123);
1094 switch (pcount - pindex) {
1095 case 1:
1096 expect(pvecp[pindex].vp_size == PAGE_SIZE * 4 - 456);
1097 break;
1098 case 2:
1099 if (pvecp[pindex].vp_size > PAGE_SIZE * 2 - 123) {
1100 expect(pvecp[pindex].vp_size == PAGE_SIZE * 3 - 123);
1101 expect(pvecp[pindex + 1].vp_addr == buf[5].phys);
1102 expect(pvecp[pindex + 1].vp_size ==
1103 PAGE_SIZE - (456 - 123));
1104 } else {
1105 expect(pvecp[pindex].vp_size == PAGE_SIZE * 2 - 123);
1106 expect(pvecp[pindex + 1].vp_addr == buf[4].phys);
1107 expect(pvecp[pindex + 1].vp_size ==
1108 PAGE_SIZE * 2 - (456 - 123));
1110 break;
1111 case 3:
1112 expect(pvecp[pindex].vp_size == PAGE_SIZE * 2 - 123);
1113 expect(pvecp[pindex + 1].vp_addr == buf[4].phys);
1114 expect(pvecp[pindex + 1].vp_size == PAGE_SIZE);
1115 expect(pvecp[pindex + 2].vp_addr == buf[5].phys);
1116 expect(pvecp[pindex + 2].vp_size == PAGE_SIZE - (456 - 123));
1117 break;
1118 default:
1119 expect(0);
1122 got_result("page faulting");
1124 free_bufs(buf, 7);
1126 /* MISSING: tests to see whether a request with VUA_WRITE or
1127 * (VUA_READ|VUA_WRITE) correctly gets an EFAULT for a read-only page.
1128 * As of writing, support for such protection is missing from the
1129 * system at all.
1133 static void phys_limit(struct vumap_vir *vvec, int vcount,
1134 struct vumap_phys *pvec, int pcount, struct buf *buf, char *desc)
1136 int i, r;
1138 r = do_vumap(SELF, vvec, vcount, 0, VUA_READ, pvec, &pcount);
1140 expect(r == OK);
1141 expect(pcount == MAPVEC_NR);
1142 for (i = 0; i < MAPVEC_NR; i++) {
1143 expect(pvec[i].vp_addr == buf[i].phys);
1144 expect(pvec[i].vp_size == PAGE_SIZE);
1147 got_result(desc);
1150 static void test_limits(void)
1152 struct vumap_vir vvec[MAPVEC_NR + 3];
1153 struct vumap_phys pvec[MAPVEC_NR + 3];
1154 struct buf buf[MAPVEC_NR + 9];
1155 int i, r, vcount, pcount, nr_bufs;
1157 test_group("limits");
1159 /* Test large contiguous range. */
1160 buf[0].pages = MAPVEC_NR + 2;
1161 buf[0].flags = BUF_PREALLOC;
1163 alloc_bufs(buf, 1);
1165 vvec[0].vv_addr = buf[0].addr;
1166 vvec[0].vv_size = (MAPVEC_NR + 2) * PAGE_SIZE;
1167 pcount = 2;
1169 r = do_vumap(SELF, vvec, 1, 0, VUA_READ, pvec, &pcount);
1171 expect(r == OK);
1172 expect(pcount == 1);
1173 expect(pvec[0].vp_addr == buf[0].phys);
1174 expect(pvec[0].vp_size == vvec[0].vv_size);
1176 got_result("large contiguous range");
1178 free_bufs(buf, 1);
1180 /* I'd like to test MAPVEC_NR contiguous ranges of MAPVEC_NR pages
1181 * each, but chances are we don't have that much contiguous memory
1182 * available at all. In fact, the previous test may already fail
1183 * because of this..
1186 for (i = 0; i < MAPVEC_NR + 2; i++) {
1187 buf[i].pages = 1;
1188 buf[i].flags = BUF_PREALLOC;
1190 buf[i].pages = 1;
1191 buf[i].flags = BUF_PREALLOC | BUF_ADJACENT;
1193 alloc_bufs(buf, MAPVEC_NR + 3);
1195 /* Test virtual limit, one below. */
1196 for (i = 0; i < MAPVEC_NR + 2; i++) {
1197 vvec[i].vv_addr = buf[i].addr;
1198 vvec[i].vv_size = PAGE_SIZE;
1200 vvec[i - 1].vv_size += PAGE_SIZE;
1202 pcount = MAPVEC_NR + 3;
1204 r = do_vumap(SELF, vvec, MAPVEC_NR - 1, 0, VUA_READ, pvec, &pcount);
1206 expect(r == OK);
1207 expect(pcount == MAPVEC_NR - 1);
1208 for (i = 0; i < MAPVEC_NR - 1; i++) {
1209 expect(pvec[i].vp_addr == buf[i].phys);
1210 expect(pvec[i].vp_size == PAGE_SIZE);
1213 got_result("virtual limit, one below");
1215 /* Test virtual limit, exact match. */
1216 pcount = MAPVEC_NR + 3;
1218 r = do_vumap(SELF, vvec, MAPVEC_NR, 0, VUA_WRITE, pvec, &pcount);
1220 expect(r == OK);
1221 expect(pcount == MAPVEC_NR);
1222 for (i = 0; i < MAPVEC_NR; i++) {
1223 expect(pvec[i].vp_addr == buf[i].phys);
1224 expect(pvec[i].vp_size == PAGE_SIZE);
1227 got_result("virtual limit, exact match");
1229 /* Test virtual limit, one above. */
1230 pcount = MAPVEC_NR + 3;
1232 r = do_vumap(SELF, vvec, MAPVEC_NR + 1, 0, VUA_READ, pvec, &pcount);
1234 expect(r == OK);
1235 expect(pcount == MAPVEC_NR);
1236 for (i = 0; i < MAPVEC_NR; i++) {
1237 expect(pvec[i].vp_addr == buf[i].phys);
1238 expect(pvec[i].vp_size == PAGE_SIZE);
1241 got_result("virtual limit, one above");
1243 /* Test virtual limit, two above. */
1244 pcount = MAPVEC_NR + 3;
1246 r = do_vumap(SELF, vvec, MAPVEC_NR + 2, 0, VUA_WRITE, pvec, &pcount);
1248 expect(r == OK);
1249 expect(pcount == MAPVEC_NR);
1250 for (i = 0; i < MAPVEC_NR; i++) {
1251 expect(pvec[i].vp_addr == buf[i].phys);
1252 expect(pvec[i].vp_size == PAGE_SIZE);
1255 got_result("virtual limit, two above");
1257 /* Test physical limit, one below, aligned. */
1258 pcount = MAPVEC_NR - 1;
1260 r = do_vumap(SELF, vvec + 2, MAPVEC_NR, 0, VUA_READ, pvec, &pcount);
1262 expect(r == OK);
1263 expect(pcount == MAPVEC_NR - 1);
1264 for (i = 0; i < MAPVEC_NR - 1; i++) {
1265 expect(pvec[i].vp_addr == buf[i + 2].phys);
1266 expect(pvec[i].vp_size == PAGE_SIZE);
1269 got_result("physical limit, one below, aligned");
1271 /* Test physical limit, one below, unaligned. */
1272 pcount = MAPVEC_NR - 1;
1274 r = do_vumap(SELF, vvec + 3, MAPVEC_NR, 0, VUA_READ, pvec, &pcount);
1276 expect(r == OK);
1277 expect(pcount == MAPVEC_NR - 1);
1278 for (i = 0; i < MAPVEC_NR - 1; i++) {
1279 expect(pvec[i].vp_addr == buf[i + 3].phys);
1280 expect(pvec[i].vp_size == PAGE_SIZE);
1283 got_result("physical limit, one below, unaligned");
1285 free_bufs(buf, MAPVEC_NR + 3);
1287 nr_bufs = sizeof(buf) / sizeof(buf[0]);
1289 /* This ends up looking in our virtual address space as follows:
1290 * [P] [P] [P] [PPP] [PPP] ...(MAPVEC_NR x [PPP])... [PPP]
1291 * ..where P is a page, and the blocks are virtually contiguous.
1293 for (i = 0; i < nr_bufs; i += 3) {
1294 buf[i].pages = 1;
1295 buf[i].flags = BUF_PREALLOC;
1296 buf[i + 1].pages = 1;
1297 buf[i + 1].flags =
1298 BUF_PREALLOC | ((i >= 3) ? BUF_ADJACENT : 0);
1299 buf[i + 2].pages = 1;
1300 buf[i + 2].flags =
1301 BUF_PREALLOC | ((i >= 3) ? BUF_ADJACENT : 0);
1304 alloc_bufs(buf, nr_bufs);
1306 for (i = 0; i < 3; i++) {
1307 vvec[i].vv_addr = buf[i].addr;
1308 vvec[i].vv_size = PAGE_SIZE;
1310 for ( ; i < nr_bufs / 3 + 1; i++) {
1311 vvec[i].vv_addr = buf[(i - 2) * 3].addr;
1312 vvec[i].vv_size = PAGE_SIZE * 3;
1314 vcount = i;
1316 /* Out of each of the following tests, one will be aligned (that is,
1317 * the last pvec entry will be for the last page in a vvec entry) and
1318 * two will be unaligned.
1321 /* Test physical limit, exact match. */
1322 phys_limit(vvec, vcount, pvec, MAPVEC_NR, buf,
1323 "physical limit, exact match, try 1");
1324 phys_limit(vvec + 1, vcount - 1, pvec, MAPVEC_NR, buf + 1,
1325 "physical limit, exact match, try 2");
1326 phys_limit(vvec + 2, vcount - 2, pvec, MAPVEC_NR, buf + 2,
1327 "physical limit, exact match, try 3");
1329 /* Test physical limit, one above. */
1330 phys_limit(vvec, vcount, pvec, MAPVEC_NR + 1, buf,
1331 "physical limit, one above, try 1");
1332 phys_limit(vvec + 1, vcount - 1, pvec, MAPVEC_NR + 1, buf + 1,
1333 "physical limit, one above, try 2");
1334 phys_limit(vvec + 2, vcount - 2, pvec, MAPVEC_NR + 1, buf + 2,
1335 "physical limit, one above, try 3");
1337 /* Test physical limit, two above. */
1338 phys_limit(vvec, vcount, pvec, MAPVEC_NR + 2, buf,
1339 "physical limit, two above, try 1");
1340 phys_limit(vvec + 1, vcount - 1, pvec, MAPVEC_NR + 2, buf + 1,
1341 "physical limit, two above, try 2");
1342 phys_limit(vvec + 2, vcount - 2, pvec, MAPVEC_NR + 2, buf + 2,
1343 "physical limit, two above, try 3");
1345 free_bufs(buf, nr_bufs);
1348 static void do_tests(int use_relay)
1350 relay = use_relay;
1352 test_basics();
1354 if (!relay) test_endpt(); /* local only */
1356 test_vector1();
1358 if (!relay) test_vector2(); /* local only */
1360 if (relay) test_grant(); /* remote only */
1362 test_offset();
1364 test_access();
1366 test_limits();
1369 static int sef_cb_init_fresh(int UNUSED(type), sef_init_info_t *UNUSED(info))
1371 int r;
1373 verbose = (env_argc > 1 && !strcmp(env_argv[1], "-v"));
1375 if (verbose)
1376 printf("Starting sys_vumap test set\n");
1378 do_tests(FALSE /*use_relay*/);
1380 if ((r = ds_retrieve_label_endpt("vumaprelay", &endpt)) != OK)
1381 panic("unable to obtain endpoint for 'vumaprelay' (%d)", r);
1383 do_tests(TRUE /*use_relay*/);
1385 if (verbose)
1386 printf("Completed sys_vumap test set, %u/%u tests failed\n",
1387 failures, count);
1389 /* The returned code will determine the outcome of the RS call, and
1390 * thus the entire test. The actual error code does not matter.
1392 return (failures) ? EINVAL : OK;
1395 static void sef_local_startup(void)
1397 sef_setcb_init_fresh(sef_cb_init_fresh);
1399 sef_startup();
1402 int main(int argc, char **argv)
1404 env_setargs(argc, argv);
1406 sef_local_startup();
1408 return 0;