1 /* Test for sys_vumap() - by D.C. van Moolenbroek */
2 #include <minix/drivers.h>
15 #define BUF_PREALLOC 0x1 /* if set, immediately allocate the page */
16 #define BUF_ADJACENT 0x2 /* virtually contiguous with the last buffer */
18 static unsigned int count
= 0, failures
= 0;
21 static char *fail_file
;
25 static endpoint_t endpt
;
30 GE_NONE
, /* no exception */
31 GE_REVOKED
, /* revoked grant */
32 GE_INVALID
/* invalid grant */
33 } grant_exception
= GE_NONE
;
35 static int grant_access
= 0;
37 #define expect(r) expect_f((r), __FILE__, __LINE__)
39 static void alloc_buf(struct buf
*buf
, phys_bytes next
)
44 int r
, prealloc
, flags
;
46 /* is_allocated() cannot handle buffers that are not physically
47 * contiguous, and we cannot guarantee physical contiguity if not
50 assert((buf
->flags
& BUF_PREALLOC
) || buf
->pages
== 1);
52 len
= buf
->pages
* PAGE_SIZE
;
53 prealloc
= (buf
->flags
& BUF_PREALLOC
);
54 flags
= MAP_ANON
| (prealloc
? (MAP_CONTIG
| MAP_PREALLOC
) : 0);
57 /* Allocate a same-sized piece of memory elsewhere, to make it
58 * very unlikely that the actual piece of memory will end up
59 * being physically contiguous with the last piece.
61 tmp
= minix_mmap((void *) (buf
->addr
+ len
+ PAGE_SIZE
), len
,
62 PROT_READ
| PROT_WRITE
, MAP_ANON
| MAP_PREALLOC
|
65 if (tmp
== MAP_FAILED
)
66 panic("unable to allocate temporary buffer");
69 addr
= (vir_bytes
) minix_mmap((void *) buf
->addr
, len
,
70 PROT_READ
| PROT_WRITE
, flags
, -1, 0L);
72 if (addr
!= buf
->addr
)
73 panic("unable to allocate buffer (2)");
78 if ((r
= minix_munmap(tmp
, len
)) != OK
)
79 panic("unable to unmap buffer (%d)", errno
);
81 if ((r
= sys_umap(SELF
, VM_D
, addr
, len
, &buf
->phys
)) < 0)
82 panic("unable to get physical address of buffer (%d)", r
);
84 if (buf
->phys
!= next
)
88 printf("WARNING: alloc noncontigous range, second try\n");
90 /* Can't remap this to elsewhere, so we run the risk of allocating the
91 * exact same physically contiguous page again. However, now that we've
92 * unmapped the temporary memory also, there's a small chance we'll end
93 * up with a different physical page this time. Who knows.
95 minix_munmap((void *) addr
, len
);
97 addr
= (vir_bytes
) minix_mmap((void *) buf
->addr
, len
,
98 PROT_READ
| PROT_WRITE
, flags
, -1, 0L);
100 if (addr
!= buf
->addr
)
101 panic("unable to allocate buffer, second try");
103 if ((r
= sys_umap(SELF
, VM_D
, addr
, len
, &buf
->phys
)) < 0)
104 panic("unable to get physical address of buffer (%d)", r
);
106 /* Still the same page? Screw it. */
107 if (buf
->phys
== next
)
108 panic("unable to allocate noncontiguous range");
111 static void alloc_bufs(struct buf
*buf
, int count
)
113 static vir_bytes base
= 0x80000000L
;
117 /* Allocate the given memory in virtually contiguous blocks whenever
118 * each next buffer is requested to be adjacent. Insert a virtual gap
119 * after each such block. Make sure that each two adjacent buffers in a
120 * block are physically non-contiguous.
122 for (i
= 0; i
< count
; i
++) {
123 if (i
> 0 && (buf
[i
].flags
& BUF_ADJACENT
)) {
124 next
= buf
[i
-1].phys
+ buf
[i
-1].pages
* PAGE_SIZE
;
126 base
+= PAGE_SIZE
* 16;
132 alloc_buf(&buf
[i
], next
);
134 base
+= buf
[i
].pages
* PAGE_SIZE
;
138 for (i
= 0; i
< count
; i
++)
139 printf("Buf %d: %d pages, flags %x, vir %08x, phys %08x\n", i
,
140 buf
[i
].pages
, buf
[i
].flags
, buf
[i
].addr
, buf
[i
].phys
);
144 static void free_bufs(struct buf
*buf
, int count
)
148 for (i
= 0; i
< count
; i
++) {
149 for (j
= 0; j
< buf
[i
].pages
; j
++) {
150 r
= minix_munmap((void *) (buf
[i
].addr
+ j
* PAGE_SIZE
),
154 panic("unable to unmap range (%d)", errno
);
159 static int is_allocated(vir_bytes addr
, size_t bytes
, phys_bytes
*phys
)
163 /* This will have to do for now. Of course, we could use sys_vumap with
164 * VUA_READ for this, but that would defeat the point of one test. It
165 * is still a decent alternative in case sys_umap's behavior ever
168 r
= sys_umap(SELF
, VM_D
, addr
, bytes
, phys
);
173 static int is_buf_allocated(struct buf
*buf
)
175 return is_allocated(buf
->addr
, buf
->pages
* PAGE_SIZE
, &buf
->phys
);
178 static void test_group(char *name
)
181 printf("Test group: %s (%s)\n",
182 name
, relay
? "relay" : "local");
185 static void expect_f(int res
, char *file
, int line
)
187 if (!res
&& success
) {
194 static void got_result(char *desc
)
201 printf("#%02d: %-38s\t[FAIL]\n", count
, desc
);
202 printf("- failure at %s:%d\n", fail_file
, fail_line
);
205 printf("#%02d: %-38s\t[PASS]\n", count
, desc
);
209 static int relay_vumap(struct vumap_vir
*vvec
, int vcount
, size_t offset
,
210 int access
, struct vumap_phys
*pvec
, int *pcount
)
212 struct vumap_vir gvvec
[MAPVEC_NR
+ 3];
213 cp_grant_id_t vgrant
, pgrant
;
217 assert(vcount
> 0 && vcount
<= MAPVEC_NR
+ 3);
218 assert(*pcount
> 0 && *pcount
<= MAPVEC_NR
+ 3);
220 /* Allow grant access flags to be overridden for testing purposes. */
221 if (!(gaccess
= grant_access
)) {
222 if (access
& VUA_READ
) gaccess
|= CPF_READ
;
223 if (access
& VUA_WRITE
) gaccess
|= CPF_WRITE
;
226 for (i
= 0; i
< vcount
; i
++) {
227 gvvec
[i
].vv_grant
= cpf_grant_direct(endpt
, vvec
[i
].vv_addr
,
228 vvec
[i
].vv_size
, gaccess
);
229 assert(gvvec
[i
].vv_grant
!= GRANT_INVALID
);
230 gvvec
[i
].vv_size
= vvec
[i
].vv_size
;
233 vgrant
= cpf_grant_direct(endpt
, (vir_bytes
) gvvec
,
234 sizeof(gvvec
[0]) * vcount
, CPF_READ
);
235 assert(vgrant
!= GRANT_INVALID
);
237 pgrant
= cpf_grant_direct(endpt
, (vir_bytes
) pvec
,
238 sizeof(pvec
[0]) * *pcount
, CPF_WRITE
);
239 assert(pgrant
!= GRANT_INVALID
);
241 /* This must be done after allocating all other grants. */
242 if (grant_exception
!= GE_NONE
) {
243 cpf_revoke(gvvec
[vcount
- 1].vv_grant
);
244 if (grant_exception
== GE_INVALID
)
245 gvvec
[vcount
- 1].vv_grant
= GRANT_INVALID
;
248 m
.m_type
= VTR_RELAY
;
249 m
.VTR_VGRANT
= vgrant
;
250 m
.VTR_VCOUNT
= vcount
;
251 m
.VTR_OFFSET
= offset
;
252 m
.VTR_ACCESS
= access
;
253 m
.VTR_PGRANT
= pgrant
;
254 m
.VTR_PCOUNT
= *pcount
;
256 r
= sendrec(endpt
, &m
);
261 for (i
= 0; i
< vcount
- !!grant_exception
; i
++)
262 cpf_revoke(gvvec
[i
].vv_grant
);
264 *pcount
= m
.VTR_PCOUNT
;
266 return (r
!= OK
) ? r
: m
.m_type
;
269 static int do_vumap(endpoint_t endpt
, struct vumap_vir
*vvec
, int vcount
,
270 size_t offset
, int access
, struct vumap_phys
*pvec
, int *pcount
)
272 struct vumap_phys pv_backup
[MAPVEC_NR
+ 3];
273 int r
, pc_backup
, pv_test
= FALSE
;
275 /* Make a copy of pvec and pcount for later. */
278 /* We cannot compare pvec contents before and after when relaying,
279 * since the original contents are not transferred.
281 if (!relay
&& pvec
!= NULL
&& pc_backup
>= 1 &&
282 pc_backup
<= MAPVEC_NR
+ 3) {
284 memcpy(pv_backup
, pvec
, sizeof(*pvec
) * pc_backup
);
287 /* Reset the test result. */
290 /* Perform the vumap call, either directly or through a relay. */
292 assert(endpt
== SELF
);
293 r
= relay_vumap(vvec
, vcount
, offset
, access
, pvec
, pcount
);
295 r
= sys_vumap(endpt
, vvec
, vcount
, offset
, access
, pvec
,
299 /* Upon failure, pvec and pcount must be unchanged. */
301 expect(pc_backup
== *pcount
);
304 expect(memcmp(pv_backup
, pvec
,
305 sizeof(*pvec
) * pc_backup
) == 0);
311 static void test_basics(void)
313 struct vumap_vir vvec
[2];
314 struct vumap_phys pvec
[4];
318 test_group("basics");
321 buf
[0].flags
= BUF_PREALLOC
;
323 buf
[1].flags
= BUF_PREALLOC
;
325 buf
[2].flags
= BUF_PREALLOC
;
327 buf
[3].flags
= BUF_PREALLOC
| BUF_ADJACENT
;
331 /* Test single whole page. */
332 vvec
[0].vv_addr
= buf
[0].addr
;
333 vvec
[0].vv_size
= PAGE_SIZE
;
336 r
= do_vumap(SELF
, vvec
, 1, 0, VUA_READ
, pvec
, &pcount
);
340 expect(pvec
[0].vp_addr
== buf
[0].phys
);
341 expect(pvec
[0].vp_size
== vvec
[0].vv_size
);
343 got_result("single whole page");
345 /* Test single partial page. */
346 vvec
[0].vv_addr
= buf
[0].addr
+ 123;
347 vvec
[0].vv_size
= PAGE_SIZE
- 456;
350 r
= do_vumap(SELF
, vvec
, 1, 0, VUA_READ
, pvec
, &pcount
);
354 expect(pvec
[0].vp_addr
== buf
[0].phys
+ 123);
355 expect(pvec
[0].vp_size
== vvec
[0].vv_size
);
357 got_result("single partial page");
359 /* Test multiple contiguous whole pages. */
360 vvec
[0].vv_addr
= buf
[1].addr
;
361 vvec
[0].vv_size
= PAGE_SIZE
* 2;
364 r
= do_vumap(SELF
, vvec
, 1, 0, VUA_READ
, pvec
, &pcount
);
368 expect(pvec
[0].vp_addr
== buf
[1].phys
);
369 expect(pvec
[0].vp_size
== vvec
[0].vv_size
);
371 got_result("multiple contiguous whole pages");
373 /* Test range in multiple contiguous pages. */
374 vvec
[0].vv_addr
= buf
[1].addr
+ 234;
375 vvec
[0].vv_size
= PAGE_SIZE
* 2 - 234;
378 r
= do_vumap(SELF
, vvec
, 1, 0, VUA_READ
, pvec
, &pcount
);
382 expect(pvec
[0].vp_addr
== buf
[1].phys
+ 234);
383 expect(pvec
[0].vp_size
== vvec
[0].vv_size
);
385 got_result("range in multiple contiguous pages");
387 /* Test multiple noncontiguous whole pages. */
388 vvec
[0].vv_addr
= buf
[2].addr
;
389 vvec
[0].vv_size
= PAGE_SIZE
* 2;
392 r
= do_vumap(SELF
, vvec
, 1, 0, VUA_READ
, pvec
, &pcount
);
396 expect(pvec
[0].vp_addr
== buf
[2].phys
);
397 expect(pvec
[0].vp_size
== PAGE_SIZE
);
398 expect(pvec
[1].vp_addr
== buf
[3].phys
);
399 expect(pvec
[1].vp_size
== PAGE_SIZE
);
401 got_result("multiple noncontiguous whole pages");
403 /* Test range in multiple noncontiguous pages. */
404 vvec
[0].vv_addr
= buf
[2].addr
+ 1;
405 vvec
[0].vv_size
= PAGE_SIZE
* 2 - 2;
408 r
= do_vumap(SELF
, vvec
, 1, 0, VUA_WRITE
, pvec
, &pcount
);
412 expect(pvec
[0].vp_addr
== buf
[2].phys
+ 1);
413 expect(pvec
[0].vp_size
== PAGE_SIZE
- 1);
414 expect(pvec
[1].vp_addr
== buf
[3].phys
);
415 expect(pvec
[1].vp_size
== PAGE_SIZE
- 1);
417 got_result("range in multiple noncontiguous pages");
419 /* Test single-input result truncation. */
420 vvec
[0].vv_addr
= buf
[2].addr
+ PAGE_SIZE
/ 2;
421 vvec
[0].vv_size
= PAGE_SIZE
;
422 pvec
[1].vp_addr
= 0L;
426 r
= do_vumap(SELF
, vvec
, 1, 0, VUA_READ
, pvec
, &pcount
);
430 expect(pvec
[0].vp_addr
== buf
[2].phys
+ PAGE_SIZE
/ 2);
431 expect(pvec
[0].vp_size
== PAGE_SIZE
/ 2);
432 expect(pvec
[1].vp_addr
== 0L);
433 expect(pvec
[1].vp_size
== 0);
435 got_result("single-input result truncation");
437 /* Test multiple inputs, contiguous first. */
438 vvec
[0].vv_addr
= buf
[0].addr
;
439 vvec
[0].vv_size
= PAGE_SIZE
;
440 vvec
[1].vv_addr
= buf
[2].addr
+ PAGE_SIZE
- 1;
444 r
= do_vumap(SELF
, vvec
, 2, 0, VUA_READ
, pvec
, &pcount
);
448 expect(pvec
[0].vp_addr
== buf
[0].phys
);
449 expect(pvec
[0].vp_size
== PAGE_SIZE
);
450 expect(pvec
[1].vp_addr
== buf
[2].phys
+ PAGE_SIZE
- 1);
451 expect(pvec
[1].vp_size
== 1);
452 expect(pvec
[2].vp_addr
== buf
[3].phys
);
453 expect(pvec
[2].vp_size
== 1);
455 got_result("multiple inputs, contiguous first");
457 /* Test multiple inputs, contiguous last. */
458 vvec
[0].vv_addr
= buf
[2].addr
+ 123;
459 vvec
[0].vv_size
= PAGE_SIZE
* 2 - 456;
460 vvec
[1].vv_addr
= buf
[1].addr
+ 234;
461 vvec
[1].vv_size
= PAGE_SIZE
* 2 - 345;
464 r
= do_vumap(SELF
, vvec
, 2, 0, VUA_WRITE
, pvec
, &pcount
);
468 expect(pvec
[0].vp_addr
== buf
[2].phys
+ 123);
469 expect(pvec
[0].vp_size
== PAGE_SIZE
- 123);
470 expect(pvec
[1].vp_addr
== buf
[3].phys
);
471 expect(pvec
[1].vp_size
== PAGE_SIZE
- (456 - 123));
472 expect(pvec
[2].vp_addr
== buf
[1].phys
+ 234);
473 expect(pvec
[2].vp_size
== vvec
[1].vv_size
);
475 got_result("multiple inputs, contiguous last");
477 /* Test multiple-inputs result truncation. */
478 vvec
[0].vv_addr
= buf
[2].addr
+ 2;
479 vvec
[0].vv_size
= PAGE_SIZE
* 2 - 3;
480 vvec
[1].vv_addr
= buf
[0].addr
;
481 vvec
[1].vv_size
= 135;
482 pvec
[2].vp_addr
= 0xDEADBEEFL
;
483 pvec
[2].vp_size
= 1234;
486 r
= do_vumap(SELF
, vvec
, 2, 0, VUA_READ
, pvec
, &pcount
);
490 expect(pvec
[0].vp_addr
== buf
[2].phys
+ 2);
491 expect(pvec
[0].vp_size
== PAGE_SIZE
- 2);
492 expect(pvec
[1].vp_addr
== buf
[3].phys
);
493 expect(pvec
[1].vp_size
== PAGE_SIZE
- 1);
494 expect(pvec
[2].vp_addr
== 0xDEADBEEFL
);
495 expect(pvec
[2].vp_size
== 1234);
497 got_result("multiple-inputs result truncation");
502 static void test_endpt(void)
504 struct vumap_vir vvec
[1];
505 struct vumap_phys pvec
[1];
509 test_group("endpoint");
512 buf
[0].flags
= BUF_PREALLOC
;
516 /* Test NONE endpoint. */
517 vvec
[0].vv_addr
= buf
[0].addr
;
518 vvec
[0].vv_size
= PAGE_SIZE
;
521 r
= do_vumap(NONE
, vvec
, 1, 0, VUA_READ
, pvec
, &pcount
);
525 got_result("NONE endpoint");
527 /* Test ANY endpoint. */
528 vvec
[0].vv_addr
= buf
[0].addr
;
529 vvec
[0].vv_size
= PAGE_SIZE
;
532 r
= do_vumap(ANY
, vvec
, 1, 0, VUA_READ
, pvec
, &pcount
);
536 got_result("ANY endpoint");
541 static void test_vector1(void)
543 struct vumap_vir vvec
[2];
544 struct vumap_phys pvec
[3];
548 test_group("vector, part 1");
551 buf
[0].flags
= BUF_PREALLOC
;
553 buf
[1].flags
= BUF_PREALLOC
;
557 /* Test zero virtual memory size. */
558 vvec
[0].vv_addr
= buf
[0].addr
;
559 vvec
[0].vv_size
= PAGE_SIZE
* 2;
560 vvec
[1].vv_addr
= buf
[1].addr
;
564 r
= do_vumap(SELF
, vvec
, 2, 0, VUA_READ
, pvec
, &pcount
);
568 got_result("zero virtual memory size");
570 /* Test excessive virtual memory size. */
571 vvec
[1].vv_size
= (vir_bytes
) -1;
573 r
= do_vumap(SELF
, vvec
, 2, 0, VUA_READ
, pvec
, &pcount
);
575 expect(r
== EFAULT
|| r
== EPERM
);
577 got_result("excessive virtual memory size");
579 /* Test invalid virtual memory. */
580 vvec
[1].vv_addr
= 0L;
581 vvec
[1].vv_size
= PAGE_SIZE
;
583 r
= do_vumap(SELF
, vvec
, 2, 0, VUA_READ
, pvec
, &pcount
);
587 got_result("invalid virtual memory");
589 /* Test virtual memory overrun. */
591 vvec
[1].vv_addr
= buf
[1].addr
;
593 r
= do_vumap(SELF
, vvec
, 2, 0, VUA_READ
, pvec
, &pcount
);
597 got_result("virtual memory overrun");
602 static void test_vector2(void)
604 struct vumap_vir vvec
[2], *vvecp
;
605 struct vumap_phys pvec
[3], *pvecp
;
610 test_group("vector, part 2");
613 buf
[0].flags
= BUF_PREALLOC
;
615 buf
[1].flags
= BUF_PREALLOC
;
619 /* Test zero virtual count. */
620 vvec
[0].vv_addr
= buf
[0].addr
;
621 vvec
[0].vv_size
= PAGE_SIZE
* 2;
622 vvec
[1].vv_addr
= buf
[1].addr
;
623 vvec
[1].vv_size
= PAGE_SIZE
;
626 r
= do_vumap(SELF
, vvec
, 0, 0, VUA_READ
, pvec
, &pcount
);
630 got_result("zero virtual count");
632 /* Test negative virtual count. */
633 r
= do_vumap(SELF
, vvec
, -1, 0, VUA_WRITE
, pvec
, &pcount
);
637 got_result("negative virtual count");
639 /* Test zero physical count. */
642 r
= do_vumap(SELF
, vvec
, 2, 0, VUA_WRITE
, pvec
, &pcount
);
646 got_result("zero physical count");
648 /* Test negative physical count. */
651 r
= do_vumap(SELF
, vvec
, 2, 0, VUA_READ
, pvec
, &pcount
);
655 got_result("negative physical count");
657 /* Test invalid virtual vector pointer. */
660 r
= do_vumap(SELF
, NULL
, 2, 0, VUA_READ
, pvec
, &pcount
);
664 got_result("invalid virtual vector pointer");
666 /* Test unallocated virtual vector. */
667 vvecp
= (struct vumap_vir
*) minix_mmap(NULL
, PAGE_SIZE
,
668 PROT_READ
| PROT_WRITE
, MAP_ANON
, -1, 0L);
670 if (vvecp
== MAP_FAILED
)
671 panic("unable to allocate virtual vector");
673 r
= do_vumap(SELF
, vvecp
, 2, 0, VUA_READ
, pvec
, &pcount
);
676 expect(!is_allocated((vir_bytes
) vvecp
, PAGE_SIZE
, &dummy
));
678 got_result("unallocated virtual vector pointer");
680 minix_munmap((void *) vvecp
, PAGE_SIZE
);
682 /* Test invalid physical vector pointer. */
683 r
= do_vumap(SELF
, vvec
, 2, 0, VUA_READ
, NULL
, &pcount
);
687 got_result("invalid physical vector pointer");
689 /* Test unallocated physical vector. */
690 pvecp
= (struct vumap_phys
*) minix_mmap(NULL
, PAGE_SIZE
,
691 PROT_READ
| PROT_WRITE
, MAP_ANON
, -1, 0L);
693 if (pvecp
== MAP_FAILED
)
694 panic("unable to allocate physical vector");
696 r
= do_vumap(SELF
, vvec
, 2, 0, VUA_READ
, pvecp
, &pcount
);
699 expect(is_allocated((vir_bytes
) pvecp
, PAGE_SIZE
, &dummy
));
701 expect(pvecp
[0].vp_size
== PAGE_SIZE
* 2);
702 expect(pvecp
[0].vp_addr
== buf
[0].phys
);
703 expect(pvecp
[1].vp_size
== PAGE_SIZE
);
704 expect(pvecp
[1].vp_addr
== buf
[1].phys
);
706 got_result("unallocated physical vector pointer");
708 minix_munmap((void *) pvecp
, PAGE_SIZE
);
713 static void test_grant(void)
715 struct vumap_vir vvec
[2];
716 struct vumap_phys pvec
[3];
723 buf
[0].flags
= BUF_PREALLOC
;
725 buf
[1].flags
= BUF_PREALLOC
;
729 /* Test write-only access on read-only grant. */
730 grant_access
= CPF_READ
; /* override */
732 vvec
[0].vv_addr
= buf
[0].addr
;
733 vvec
[0].vv_size
= PAGE_SIZE
;
736 r
= do_vumap(SELF
, vvec
, 1, 0, VUA_WRITE
, pvec
, &pcount
);
740 got_result("write-only access on read-only grant");
742 /* Test read-write access on read-only grant. */
743 r
= do_vumap(SELF
, vvec
, 1, 0, VUA_READ
| VUA_WRITE
, pvec
, &pcount
);
747 got_result("read-write access on read-only grant");
749 /* Test read-only access on write-only grant. */
750 grant_access
= CPF_WRITE
; /* override */
752 r
= do_vumap(SELF
, vvec
, 1, 0, VUA_READ
, pvec
, &pcount
);
756 got_result("read-only access on write-only grant");
758 /* Test read-write access on write grant. */
759 r
= do_vumap(SELF
, vvec
, 1, 0, VUA_READ
| VUA_WRITE
, pvec
, &pcount
);
763 got_result("read-write access on write-only grant");
765 /* Test read-only access on read-write grant. */
766 grant_access
= CPF_READ
| CPF_WRITE
; /* override */
768 r
= do_vumap(SELF
, vvec
, 1, 0, VUA_READ
, pvec
, &pcount
);
772 expect(pvec
[0].vp_size
== PAGE_SIZE
);
773 expect(pvec
[0].vp_addr
== buf
[0].phys
);
775 got_result("read-only access on read-write grant");
777 grant_access
= 0; /* reset */
779 /* Test invalid grant. */
780 grant_exception
= GE_INVALID
;
782 vvec
[0].vv_addr
= buf
[0].addr
;
783 vvec
[0].vv_size
= PAGE_SIZE
;
784 vvec
[1].vv_addr
= buf
[1].addr
;
785 vvec
[1].vv_size
= PAGE_SIZE
* 2;
788 r
= do_vumap(SELF
, vvec
, 2, 0, VUA_READ
, pvec
, &pcount
);
792 got_result("invalid grant");
794 /* Test revoked grant. */
795 grant_exception
= GE_REVOKED
;
797 r
= do_vumap(SELF
, vvec
, 2, 0, VUA_READ
, pvec
, &pcount
);
801 got_result("revoked grant");
803 grant_exception
= GE_NONE
;
808 static void test_offset(void)
810 struct vumap_vir vvec
[2];
811 struct vumap_phys pvec
[3];
816 test_group("offsets");
819 buf
[0].flags
= BUF_PREALLOC
;
821 buf
[1].flags
= BUF_PREALLOC
;
823 buf
[2].flags
= BUF_PREALLOC
;
825 buf
[3].flags
= BUF_PREALLOC
| BUF_ADJACENT
;
829 /* Test offset into aligned page. */
831 vvec
[0].vv_addr
= buf
[0].addr
;
832 vvec
[0].vv_size
= PAGE_SIZE
;
835 r
= do_vumap(SELF
, vvec
, 1, off
, VUA_READ
, pvec
, &pcount
);
839 expect(pvec
[0].vp_addr
== buf
[0].phys
+ off
);
840 expect(pvec
[0].vp_size
== vvec
[0].vv_size
- off
);
842 got_result("offset into aligned page");
844 /* Test offset into unaligned page. */
846 assert(off
+ off2
< PAGE_SIZE
);
847 vvec
[0].vv_addr
= buf
[0].addr
+ off
;
848 vvec
[0].vv_size
= PAGE_SIZE
- off
;
851 r
= do_vumap(SELF
, vvec
, 1, off2
, VUA_READ
, pvec
, &pcount
);
855 expect(pvec
[0].vp_addr
== buf
[0].phys
+ off
+ off2
);
856 expect(pvec
[0].vp_size
== vvec
[0].vv_size
- off2
);
858 got_result("offset into unaligned page");
860 /* Test offset into unaligned page set. */
863 assert(off
+ off2
< PAGE_SIZE
);
864 vvec
[0].vv_addr
= buf
[1].addr
+ off
;
865 vvec
[0].vv_size
= (PAGE_SIZE
- off
) * 2;
868 r
= do_vumap(SELF
, vvec
, 1, off2
, VUA_READ
, pvec
, &pcount
);
872 expect(pvec
[0].vp_addr
== buf
[1].phys
+ off
+ off2
);
873 expect(pvec
[0].vp_size
== vvec
[0].vv_size
- off2
);
875 got_result("offset into contiguous page set");
877 /* Test offset into noncontiguous page set. */
878 vvec
[0].vv_addr
= buf
[2].addr
+ off
;
879 vvec
[0].vv_size
= (PAGE_SIZE
- off
) * 2;
882 r
= do_vumap(SELF
, vvec
, 1, off2
, VUA_READ
, pvec
, &pcount
);
886 expect(pvec
[0].vp_addr
== buf
[2].phys
+ off
+ off2
);
887 expect(pvec
[0].vp_size
== PAGE_SIZE
- off
- off2
);
888 expect(pvec
[1].vp_addr
== buf
[3].phys
);
889 expect(pvec
[1].vp_size
== PAGE_SIZE
- off
);
891 got_result("offset into noncontiguous page set");
893 /* Test offset to last byte. */
894 off
= PAGE_SIZE
- off2
- 1;
895 vvec
[0].vv_addr
= buf
[0].addr
+ off2
;
896 vvec
[0].vv_size
= PAGE_SIZE
- off2
;
899 r
= do_vumap(SELF
, vvec
, 1, off
, VUA_READ
, pvec
, &pcount
);
903 expect(pvec
[0].vp_addr
== buf
[0].phys
+ off
+ off2
);
904 expect(pvec
[0].vp_size
== 1);
906 got_result("offset to last byte");
908 /* Test offset at range end. */
910 vvec
[0].vv_addr
= buf
[1].addr
+ off
;
911 vvec
[0].vv_size
= PAGE_SIZE
- off
* 2;
912 vvec
[1].vv_addr
= vvec
[0].vv_addr
+ vvec
[0].vv_size
;
913 vvec
[1].vv_size
= off
;
915 r
= do_vumap(SELF
, vvec
, 2, vvec
[0].vv_size
, VUA_READ
, pvec
, &pcount
);
919 got_result("offset at range end");
921 /* Test offset beyond range end. */
922 vvec
[0].vv_addr
= buf
[1].addr
;
923 vvec
[0].vv_size
= PAGE_SIZE
;
924 vvec
[1].vv_addr
= buf
[1].addr
+ PAGE_SIZE
;
925 vvec
[1].vv_size
= PAGE_SIZE
;
927 r
= do_vumap(SELF
, vvec
, 2, PAGE_SIZE
+ off
, VUA_READ
, pvec
, &pcount
);
931 got_result("offset beyond range end");
933 /* Test negative offset. */
934 vvec
[0].vv_addr
= buf
[1].addr
+ off
+ off2
;
935 vvec
[0].vv_size
= PAGE_SIZE
;
937 r
= do_vumap(SELF
, vvec
, 1, (size_t) -1, VUA_READ
, pvec
, &pcount
);
941 got_result("negative offset");
946 static void test_access(void)
948 struct vumap_vir vvec
[3];
949 struct vumap_phys pvec
[4], *pvecp
;
951 int i
, r
, pcount
, pindex
;
953 test_group("access");
958 buf
[1].flags
= BUF_PREALLOC
| BUF_ADJACENT
;
960 buf
[2].flags
= BUF_ADJACENT
;
964 /* Test no access flags. */
965 vvec
[0].vv_addr
= buf
[0].addr
;
966 vvec
[0].vv_size
= PAGE_SIZE
* 3;
969 r
= do_vumap(SELF
, vvec
, 1, 0, 0, pvec
, &pcount
);
972 expect(!is_buf_allocated(&buf
[0]));
973 expect(is_buf_allocated(&buf
[1]));
974 expect(!is_buf_allocated(&buf
[2]));
976 got_result("no access flags");
978 /* Test read-only access. */
979 vvec
[0].vv_addr
= buf
[0].addr
;
980 vvec
[0].vv_size
= PAGE_SIZE
* 3;
983 r
= do_vumap(SELF
, vvec
, 1, 0, VUA_READ
, pvec
, &pcount
);
986 expect(!is_buf_allocated(&buf
[0]));
987 expect(is_buf_allocated(&buf
[1]));
988 expect(!is_buf_allocated(&buf
[2]));
990 got_result("read-only access");
992 /* Test read-write access. */
993 vvec
[0].vv_addr
= buf
[0].addr
;
994 vvec
[0].vv_size
= PAGE_SIZE
* 3;
997 r
= do_vumap(SELF
, vvec
, 1, 0, VUA_READ
| VUA_WRITE
, pvec
, &pcount
);
1000 expect(!is_buf_allocated(&buf
[0]));
1001 expect(is_buf_allocated(&buf
[1]));
1002 expect(!is_buf_allocated(&buf
[2]));
1004 got_result("read-write access");
1006 /* Test write-only access. */
1007 vvec
[0].vv_addr
= buf
[0].addr
;
1008 vvec
[0].vv_size
= PAGE_SIZE
* 3;
1011 r
= do_vumap(SELF
, vvec
, 1, 0, VUA_WRITE
, pvec
, &pcount
);
1014 /* We don't control the physical addresses of the faulted-in pages, so
1015 * they may or may not end up being contiguous with their neighbours.
1017 expect(pcount
>= 1 && pcount
<= 3);
1018 expect(is_buf_allocated(&buf
[0]));
1019 expect(is_buf_allocated(&buf
[1]));
1020 expect(is_buf_allocated(&buf
[2]));
1021 expect(pvec
[0].vp_addr
== buf
[0].phys
);
1024 expect(pvec
[0].vp_size
== PAGE_SIZE
* 3);
1027 expect(pvec
[0].vp_size
+ pvec
[1].vp_size
== PAGE_SIZE
* 3);
1028 if (pvec
[0].vp_size
> PAGE_SIZE
)
1029 expect(pvec
[1].vp_addr
== buf
[2].phys
);
1031 expect(pvec
[1].vp_addr
== buf
[1].phys
);
1034 expect(pvec
[0].vp_size
== PAGE_SIZE
);
1035 expect(pvec
[1].vp_addr
== buf
[1].phys
);
1036 expect(pvec
[1].vp_size
== PAGE_SIZE
);
1037 expect(pvec
[2].vp_addr
== buf
[2].phys
);
1038 expect(pvec
[2].vp_size
== PAGE_SIZE
);
1042 got_result("write-only access");
1046 /* Test page faulting. */
1050 buf
[1].flags
= BUF_PREALLOC
| BUF_ADJACENT
;
1054 buf
[3].flags
= BUF_PREALLOC
;
1056 buf
[4].flags
= BUF_ADJACENT
;
1058 buf
[5].flags
= BUF_ADJACENT
;
1064 vvec
[0].vv_addr
= buf
[0].addr
+ PAGE_SIZE
- 1;
1065 vvec
[0].vv_size
= PAGE_SIZE
- 1;
1066 vvec
[1].vv_addr
= buf
[2].addr
;
1067 vvec
[1].vv_size
= PAGE_SIZE
;
1068 vvec
[2].vv_addr
= buf
[3].addr
+ 123;
1069 vvec
[2].vv_size
= PAGE_SIZE
* 4 - 456;
1070 pvecp
= (struct vumap_phys
*) buf
[6].addr
;
1072 assert(sizeof(struct vumap_phys
) * pcount
<= PAGE_SIZE
);
1074 r
= do_vumap(SELF
, vvec
, 3, 0, VUA_WRITE
, pvecp
, &pcount
);
1077 /* Same story but more possibilities. I hope I got this right. */
1078 expect(pcount
>= 3 || pcount
<= 6);
1079 for (i
= 0; i
< 7; i
++)
1080 expect(is_buf_allocated(&buf
[i
]));
1081 expect(pvecp
[0].vp_addr
= buf
[0].phys
);
1082 if (pvecp
[0].vp_size
== 1) {
1083 expect(pvecp
[1].vp_addr
== buf
[1].phys
);
1084 expect(pvecp
[1].vp_size
== PAGE_SIZE
- 2);
1087 expect(pvecp
[0].vp_size
== PAGE_SIZE
- 1);
1090 expect(pvecp
[pindex
].vp_addr
== buf
[2].phys
);
1091 expect(pvecp
[pindex
].vp_size
== PAGE_SIZE
);
1093 expect(pvecp
[pindex
].vp_addr
== buf
[3].phys
+ 123);
1094 switch (pcount
- pindex
) {
1096 expect(pvecp
[pindex
].vp_size
== PAGE_SIZE
* 4 - 456);
1099 if (pvecp
[pindex
].vp_size
> PAGE_SIZE
* 2 - 123) {
1100 expect(pvecp
[pindex
].vp_size
== PAGE_SIZE
* 3 - 123);
1101 expect(pvecp
[pindex
+ 1].vp_addr
== buf
[5].phys
);
1102 expect(pvecp
[pindex
+ 1].vp_size
==
1103 PAGE_SIZE
- (456 - 123));
1105 expect(pvecp
[pindex
].vp_size
== PAGE_SIZE
* 2 - 123);
1106 expect(pvecp
[pindex
+ 1].vp_addr
== buf
[4].phys
);
1107 expect(pvecp
[pindex
+ 1].vp_size
==
1108 PAGE_SIZE
* 2 - (456 - 123));
1112 expect(pvecp
[pindex
].vp_size
== PAGE_SIZE
* 2 - 123);
1113 expect(pvecp
[pindex
+ 1].vp_addr
== buf
[4].phys
);
1114 expect(pvecp
[pindex
+ 1].vp_size
== PAGE_SIZE
);
1115 expect(pvecp
[pindex
+ 2].vp_addr
== buf
[5].phys
);
1116 expect(pvecp
[pindex
+ 2].vp_size
== PAGE_SIZE
- (456 - 123));
1122 got_result("page faulting");
1126 /* MISSING: tests to see whether a request with VUA_WRITE or
1127 * (VUA_READ|VUA_WRITE) correctly gets an EFAULT for a read-only page.
1128 * As of writing, support for such protection is missing from the
1133 static void phys_limit(struct vumap_vir
*vvec
, int vcount
,
1134 struct vumap_phys
*pvec
, int pcount
, struct buf
*buf
, char *desc
)
1138 r
= do_vumap(SELF
, vvec
, vcount
, 0, VUA_READ
, pvec
, &pcount
);
1141 expect(pcount
== MAPVEC_NR
);
1142 for (i
= 0; i
< MAPVEC_NR
; i
++) {
1143 expect(pvec
[i
].vp_addr
== buf
[i
].phys
);
1144 expect(pvec
[i
].vp_size
== PAGE_SIZE
);
1150 static void test_limits(void)
1152 struct vumap_vir vvec
[MAPVEC_NR
+ 3];
1153 struct vumap_phys pvec
[MAPVEC_NR
+ 3];
1154 struct buf buf
[MAPVEC_NR
+ 9];
1155 int i
, r
, vcount
, pcount
, nr_bufs
;
1157 test_group("limits");
1159 /* Test large contiguous range. */
1160 buf
[0].pages
= MAPVEC_NR
+ 2;
1161 buf
[0].flags
= BUF_PREALLOC
;
1165 vvec
[0].vv_addr
= buf
[0].addr
;
1166 vvec
[0].vv_size
= (MAPVEC_NR
+ 2) * PAGE_SIZE
;
1169 r
= do_vumap(SELF
, vvec
, 1, 0, VUA_READ
, pvec
, &pcount
);
1172 expect(pcount
== 1);
1173 expect(pvec
[0].vp_addr
== buf
[0].phys
);
1174 expect(pvec
[0].vp_size
== vvec
[0].vv_size
);
1176 got_result("large contiguous range");
1180 /* I'd like to test MAPVEC_NR contiguous ranges of MAPVEC_NR pages
1181 * each, but chances are we don't have that much contiguous memory
1182 * available at all. In fact, the previous test may already fail
1186 for (i
= 0; i
< MAPVEC_NR
+ 2; i
++) {
1188 buf
[i
].flags
= BUF_PREALLOC
;
1191 buf
[i
].flags
= BUF_PREALLOC
| BUF_ADJACENT
;
1193 alloc_bufs(buf
, MAPVEC_NR
+ 3);
1195 /* Test virtual limit, one below. */
1196 for (i
= 0; i
< MAPVEC_NR
+ 2; i
++) {
1197 vvec
[i
].vv_addr
= buf
[i
].addr
;
1198 vvec
[i
].vv_size
= PAGE_SIZE
;
1200 vvec
[i
- 1].vv_size
+= PAGE_SIZE
;
1202 pcount
= MAPVEC_NR
+ 3;
1204 r
= do_vumap(SELF
, vvec
, MAPVEC_NR
- 1, 0, VUA_READ
, pvec
, &pcount
);
1207 expect(pcount
== MAPVEC_NR
- 1);
1208 for (i
= 0; i
< MAPVEC_NR
- 1; i
++) {
1209 expect(pvec
[i
].vp_addr
== buf
[i
].phys
);
1210 expect(pvec
[i
].vp_size
== PAGE_SIZE
);
1213 got_result("virtual limit, one below");
1215 /* Test virtual limit, exact match. */
1216 pcount
= MAPVEC_NR
+ 3;
1218 r
= do_vumap(SELF
, vvec
, MAPVEC_NR
, 0, VUA_WRITE
, pvec
, &pcount
);
1221 expect(pcount
== MAPVEC_NR
);
1222 for (i
= 0; i
< MAPVEC_NR
; i
++) {
1223 expect(pvec
[i
].vp_addr
== buf
[i
].phys
);
1224 expect(pvec
[i
].vp_size
== PAGE_SIZE
);
1227 got_result("virtual limit, exact match");
1229 /* Test virtual limit, one above. */
1230 pcount
= MAPVEC_NR
+ 3;
1232 r
= do_vumap(SELF
, vvec
, MAPVEC_NR
+ 1, 0, VUA_READ
, pvec
, &pcount
);
1235 expect(pcount
== MAPVEC_NR
);
1236 for (i
= 0; i
< MAPVEC_NR
; i
++) {
1237 expect(pvec
[i
].vp_addr
== buf
[i
].phys
);
1238 expect(pvec
[i
].vp_size
== PAGE_SIZE
);
1241 got_result("virtual limit, one above");
1243 /* Test virtual limit, two above. */
1244 pcount
= MAPVEC_NR
+ 3;
1246 r
= do_vumap(SELF
, vvec
, MAPVEC_NR
+ 2, 0, VUA_WRITE
, pvec
, &pcount
);
1249 expect(pcount
== MAPVEC_NR
);
1250 for (i
= 0; i
< MAPVEC_NR
; i
++) {
1251 expect(pvec
[i
].vp_addr
== buf
[i
].phys
);
1252 expect(pvec
[i
].vp_size
== PAGE_SIZE
);
1255 got_result("virtual limit, two above");
1257 /* Test physical limit, one below, aligned. */
1258 pcount
= MAPVEC_NR
- 1;
1260 r
= do_vumap(SELF
, vvec
+ 2, MAPVEC_NR
, 0, VUA_READ
, pvec
, &pcount
);
1263 expect(pcount
== MAPVEC_NR
- 1);
1264 for (i
= 0; i
< MAPVEC_NR
- 1; i
++) {
1265 expect(pvec
[i
].vp_addr
== buf
[i
+ 2].phys
);
1266 expect(pvec
[i
].vp_size
== PAGE_SIZE
);
1269 got_result("physical limit, one below, aligned");
1271 /* Test physical limit, one below, unaligned. */
1272 pcount
= MAPVEC_NR
- 1;
1274 r
= do_vumap(SELF
, vvec
+ 3, MAPVEC_NR
, 0, VUA_READ
, pvec
, &pcount
);
1277 expect(pcount
== MAPVEC_NR
- 1);
1278 for (i
= 0; i
< MAPVEC_NR
- 1; i
++) {
1279 expect(pvec
[i
].vp_addr
== buf
[i
+ 3].phys
);
1280 expect(pvec
[i
].vp_size
== PAGE_SIZE
);
1283 got_result("physical limit, one below, unaligned");
1285 free_bufs(buf
, MAPVEC_NR
+ 3);
1287 nr_bufs
= sizeof(buf
) / sizeof(buf
[0]);
1289 /* This ends up looking in our virtual address space as follows:
1290 * [P] [P] [P] [PPP] [PPP] ...(MAPVEC_NR x [PPP])... [PPP]
1291 * ..where P is a page, and the blocks are virtually contiguous.
1293 for (i
= 0; i
< nr_bufs
; i
+= 3) {
1295 buf
[i
].flags
= BUF_PREALLOC
;
1296 buf
[i
+ 1].pages
= 1;
1298 BUF_PREALLOC
| ((i
>= 3) ? BUF_ADJACENT
: 0);
1299 buf
[i
+ 2].pages
= 1;
1301 BUF_PREALLOC
| ((i
>= 3) ? BUF_ADJACENT
: 0);
1304 alloc_bufs(buf
, nr_bufs
);
1306 for (i
= 0; i
< 3; i
++) {
1307 vvec
[i
].vv_addr
= buf
[i
].addr
;
1308 vvec
[i
].vv_size
= PAGE_SIZE
;
1310 for ( ; i
< nr_bufs
/ 3 + 1; i
++) {
1311 vvec
[i
].vv_addr
= buf
[(i
- 2) * 3].addr
;
1312 vvec
[i
].vv_size
= PAGE_SIZE
* 3;
1316 /* Out of each of the following tests, one will be aligned (that is,
1317 * the last pvec entry will be for the last page in a vvec entry) and
1318 * two will be unaligned.
1321 /* Test physical limit, exact match. */
1322 phys_limit(vvec
, vcount
, pvec
, MAPVEC_NR
, buf
,
1323 "physical limit, exact match, try 1");
1324 phys_limit(vvec
+ 1, vcount
- 1, pvec
, MAPVEC_NR
, buf
+ 1,
1325 "physical limit, exact match, try 2");
1326 phys_limit(vvec
+ 2, vcount
- 2, pvec
, MAPVEC_NR
, buf
+ 2,
1327 "physical limit, exact match, try 3");
1329 /* Test physical limit, one above. */
1330 phys_limit(vvec
, vcount
, pvec
, MAPVEC_NR
+ 1, buf
,
1331 "physical limit, one above, try 1");
1332 phys_limit(vvec
+ 1, vcount
- 1, pvec
, MAPVEC_NR
+ 1, buf
+ 1,
1333 "physical limit, one above, try 2");
1334 phys_limit(vvec
+ 2, vcount
- 2, pvec
, MAPVEC_NR
+ 1, buf
+ 2,
1335 "physical limit, one above, try 3");
1337 /* Test physical limit, two above. */
1338 phys_limit(vvec
, vcount
, pvec
, MAPVEC_NR
+ 2, buf
,
1339 "physical limit, two above, try 1");
1340 phys_limit(vvec
+ 1, vcount
- 1, pvec
, MAPVEC_NR
+ 2, buf
+ 1,
1341 "physical limit, two above, try 2");
1342 phys_limit(vvec
+ 2, vcount
- 2, pvec
, MAPVEC_NR
+ 2, buf
+ 2,
1343 "physical limit, two above, try 3");
1345 free_bufs(buf
, nr_bufs
);
1348 static void do_tests(int use_relay
)
1354 if (!relay
) test_endpt(); /* local only */
1358 if (!relay
) test_vector2(); /* local only */
1360 if (relay
) test_grant(); /* remote only */
1369 static int sef_cb_init_fresh(int UNUSED(type
), sef_init_info_t
*UNUSED(info
))
1373 verbose
= (env_argc
> 1 && !strcmp(env_argv
[1], "-v"));
1376 printf("Starting sys_vumap test set\n");
1378 do_tests(FALSE
/*use_relay*/);
1380 if ((r
= ds_retrieve_label_endpt("vumaprelay", &endpt
)) != OK
)
1381 panic("unable to obtain endpoint for 'vumaprelay' (%d)", r
);
1383 do_tests(TRUE
/*use_relay*/);
1386 printf("Completed sys_vumap test set, %u/%u tests failed\n",
1389 /* The returned code will determine the outcome of the RS call, and
1390 * thus the entire test. The actual error code does not matter.
1392 return (failures
) ? EINVAL
: OK
;
1395 static void sef_local_startup(void)
1397 sef_setcb_init_fresh(sef_cb_init_fresh
);
1402 int main(int argc
, char **argv
)
1404 env_setargs(argc
, argv
);
1406 sef_local_startup();