5 #include <minix/callnr.h>
6 #include <minix/type.h>
7 #include <minix/config.h>
8 #include <minix/const.h>
9 #include <minix/sysutil.h>
10 #include <minix/syslib.h>
11 #include <minix/debug.h>
12 #include <minix/bitmap.h>
13 #include <minix/hash.h>
23 #include <sys/param.h>
30 #include "sanitycheck.h"
35 static yielded_t
*lru_youngest
= NULL
, *lru_oldest
= NULL
;
37 /* Should a physblock be mapped writable? */
38 #define WRITABLE(r, pb) \
39 (((r)->flags & VR_WRITABLE) && \
40 (((r)->flags & (VR_DIRECT | VR_SHARED)) || \
43 static int map_new_physblock(struct vmproc
*vmp
, struct vir_region
44 *region
, vir_bytes offset
, vir_bytes length
, phys_bytes what
, u32_t
45 allocflags
, int written
);
47 static int map_ph_writept(struct vmproc
*vmp
, struct vir_region
*vr
,
48 struct phys_region
*pr
);
50 static phys_bytes
freeyieldednode(yielded_t
*node
, int freemem
);
52 static struct vir_region
*map_copy_region(struct vmproc
*vmp
, struct
55 static struct phys_region
*map_clone_ph_block(struct vmproc
*vmp
,
56 struct vir_region
*region
, struct phys_region
*ph
, physr_iter
*iter
);
59 static void lrucheck(void);
62 /* hash table of yielded blocks */
63 #define YIELD_HASHSIZE 65536
64 static yielded_avl vm_yielded_blocks
[YIELD_HASHSIZE
];
66 static int avl_inited
= 0;
68 void map_region_init(void)
72 for(h
= 0; h
< YIELD_HASHSIZE
; h
++)
73 yielded_init(&vm_yielded_blocks
[h
]);
77 static yielded_avl
*get_yielded_avl(block_id_t id
)
83 hash_i_64(id
.owner
, id
.id
, h
);
84 h
= h
% YIELD_HASHSIZE
;
87 assert(h
< YIELD_HASHSIZE
);
89 return &vm_yielded_blocks
[h
];
92 static char *map_name(struct vir_region
*vr
)
94 static char name
[100];
96 int type
= vr
->flags
& (VR_ANON
|VR_DIRECT
);
99 typename
= "anonymous";
105 panic("unknown mapping type: %d", type
);
122 tag
= "unknown tag value";
126 sprintf(name
, "%s, %s", typename
, tag
);
131 void map_printregion(struct vmproc
*vmp
, struct vir_region
*vr
)
134 struct phys_region
*ph
;
135 printf("map_printmap: map_name: %s\n", map_name(vr
));
136 printf("\t%lx (len 0x%lx, %lukB), %p\n",
137 vr
->vaddr
, vr
->length
, vr
->length
/1024, map_name(vr
));
138 printf("\t\tphysblocks:\n");
139 physr_start_iter_least(vr
->phys
, &iter
);
140 while((ph
= physr_get_iter(&iter
))) {
141 printf("\t\t@ %lx (refs %d): phys 0x%lx\n",
142 (vr
->vaddr
+ ph
->offset
),
143 ph
->ph
->refcount
, ph
->ph
->phys
);
144 physr_incr_iter(&iter
);
148 /*===========================================================================*
150 *===========================================================================*/
151 void map_printmap(vmp
)
154 struct vir_region
*vr
;
157 printf("memory regions in process %d:\n", vmp
->vm_endpoint
);
159 region_start_iter_least(&vmp
->vm_regions_avl
, &iter
);
160 while((vr
= region_get_iter(&iter
))) {
161 map_printregion(vmp
, vr
);
162 region_incr_iter(&iter
);
166 static struct vir_region
*getnextvr(struct vir_region
*vr
)
168 struct vir_region
*nextvr
;
171 region_start_iter(&vr
->parent
->vm_regions_avl
, &v_iter
, vr
->vaddr
, AVL_EQUAL
);
172 assert(region_get_iter(&v_iter
));
173 assert(region_get_iter(&v_iter
) == vr
);
174 region_incr_iter(&v_iter
);
175 nextvr
= region_get_iter(&v_iter
);
176 if(!nextvr
) return NULL
;
178 assert(vr
->parent
== nextvr
->parent
);
179 assert(vr
->vaddr
< nextvr
->vaddr
);
180 assert(vr
->vaddr
+ vr
->length
<= nextvr
->vaddr
);
186 /*===========================================================================*
187 * map_sanitycheck_pt *
188 *===========================================================================*/
189 static int map_sanitycheck_pt(struct vmproc
*vmp
,
190 struct vir_region
*vr
, struct phys_region
*pr
)
192 struct phys_block
*pb
= pr
->ph
;
201 r
= pt_writemap(vmp
, &vmp
->vm_pt
, vr
->vaddr
+ pr
->offset
,
202 pb
->phys
, VM_PAGE_SIZE
, PTF_PRESENT
| PTF_USER
| rw
, WMF_VERIFY
);
205 printf("proc %d phys_region 0x%lx sanity check failed\n",
206 vmp
->vm_endpoint
, pr
->offset
);
207 map_printregion(vmp
, vr
);
213 /*===========================================================================*
215 *===========================================================================*/
216 void map_sanitycheck(char *file
, int line
)
222 /* Macro for looping over all physical blocks of all regions of
225 #define ALLREGIONS(regioncode, physcode) \
226 for(vmp = vmproc; vmp < &vmproc[VMP_NR]; vmp++) { \
227 region_iter v_iter; \
228 struct vir_region *vr; \
229 if(!(vmp->vm_flags & VMF_INUSE)) \
231 region_start_iter_least(&vmp->vm_regions_avl, &v_iter); \
232 while((vr = region_get_iter(&v_iter))) { \
234 struct phys_region *pr; \
236 physr_start_iter_least(vr->phys, &iter); \
237 while((pr = physr_get_iter(&iter))) { \
239 physr_incr_iter(&iter); \
241 region_incr_iter(&v_iter); \
245 #define MYSLABSANE(s) MYASSERT(slabsane_f(__FILE__, __LINE__, s, sizeof(*(s))))
246 /* Basic pointers check. */
247 ALLREGIONS(MYSLABSANE(vr
),MYSLABSANE(pr
); MYSLABSANE(pr
->ph
);MYSLABSANE(pr
->parent
));
248 ALLREGIONS(/* MYASSERT(vr->parent == vmp) */,MYASSERT(pr
->parent
== vr
););
250 /* Do counting for consistency check. */
251 ALLREGIONS(;,USE(pr
->ph
, pr
->ph
->seencount
= 0;););
252 ALLREGIONS(;,USE(pr
->ph
, pr
->ph
->seencount
++;);
253 if(pr
->ph
->seencount
== 1) {
254 if(!(pr
->parent
->flags
& VR_DIRECT
)) {
255 MYASSERT(usedpages_add(pr
->ph
->phys
,
256 VM_PAGE_SIZE
) == OK
);
261 /* Do consistency check. */
262 ALLREGIONS({ struct vir_region
*nextvr
= getnextvr(vr
);
264 MYASSERT(vr
->vaddr
< nextvr
->vaddr
);
265 MYASSERT(vr
->vaddr
+ vr
->length
<= nextvr
->vaddr
);
268 MYASSERT(!(vr
->vaddr
% VM_PAGE_SIZE
));,
269 if(pr
->ph
->refcount
!= pr
->ph
->seencount
) {
271 printf("ph in vr %p: 0x%lx refcount %u "
272 "but seencount %u\n",
274 pr
->ph
->refcount
, pr
->ph
->seencount
);
278 struct phys_region
*others
;
279 if(pr
->ph
->refcount
> 0) {
280 MYASSERT(pr
->ph
->firstregion
);
281 if(pr
->ph
->refcount
== 1) {
282 MYASSERT(pr
->ph
->firstregion
== pr
);
285 MYASSERT(!pr
->ph
->firstregion
);
287 for(others
= pr
->ph
->firstregion
; others
;
288 others
= others
->next_ph_list
) {
290 MYASSERT(others
->ph
== pr
->ph
);
293 MYASSERT(pr
->ph
->refcount
== n_others
);
295 MYASSERT(pr
->ph
->refcount
== pr
->ph
->seencount
);
296 MYASSERT(!(pr
->offset
% VM_PAGE_SIZE
)););
297 ALLREGIONS(,MYASSERT(map_sanitycheck_pt(vmp
, vr
, pr
) == OK
));
300 #define LRUCHECK lrucheck()
302 static void lrucheck(void)
306 /* list is empty and ok if both ends point to null. */
307 if(!lru_youngest
&& !lru_oldest
)
310 /* if not, both should point to something. */
311 SLABSANE(lru_youngest
);
312 SLABSANE(lru_oldest
);
314 assert(!lru_youngest
->younger
);
315 assert(!lru_oldest
->older
);
317 for(list
= lru_youngest
; list
; list
= list
->older
) {
320 SLABSANE(list
->younger
);
321 assert(list
->younger
->older
== list
);
322 } else assert(list
== lru_youngest
);
324 SLABSANE(list
->older
);
325 assert(list
->older
->younger
== list
);
326 } else assert(list
== lru_oldest
);
330 void blockstats(void)
338 s
= getuptime(&ticks
);
344 for(list
= lru_youngest
; list
; list
= list
->older
) {
350 printf("%d blocks, %lukB; ", blocks
, mem
/1024);
359 /*=========================================================================*
361 *=========================================================================*/
362 static int map_ph_writept(struct vmproc
*vmp
, struct vir_region
*vr
,
363 struct phys_region
*pr
)
366 struct phys_block
*pb
= pr
->ph
;
368 assert(!(vr
->vaddr
% VM_PAGE_SIZE
));
369 assert(!(pr
->offset
% VM_PAGE_SIZE
));
370 assert(pb
->refcount
> 0);
377 if(pt_writemap(vmp
, &vmp
->vm_pt
, vr
->vaddr
+ pr
->offset
,
378 pb
->phys
, VM_PAGE_SIZE
, PTF_PRESENT
| PTF_USER
| rw
,
382 WMF_OVERWRITE
) != OK
) {
383 printf("VM: map_writept: pt_writemap failed\n");
388 USE(pr
, pr
->written
= 1;);
394 #define SLOT_FAIL ((vir_bytes) -1)
396 /*===========================================================================*
397 * region_find_slot_range *
398 *===========================================================================*/
399 static vir_bytes
region_find_slot_range(struct vmproc
*vmp
,
400 vir_bytes minv
, vir_bytes maxv
, vir_bytes length
)
402 struct vir_region
*lastregion
;
403 vir_bytes startv
= 0;
407 SANITYCHECK(SCL_FUNCTIONS
);
409 /* Length must be reasonable. */
412 /* Special case: allow caller to set maxv to 0 meaning 'I want
413 * it to be mapped in right here.'
416 maxv
= minv
+ length
;
420 printf("region_find_slot: minv 0x%lx and bytes 0x%lx\n",
426 /* Basic input sanity checks. */
427 assert(!(length
% VM_PAGE_SIZE
));
429 printf("VM: 1 minv: 0x%lx maxv: 0x%lx length: 0x%lx\n",
433 assert(minv
+ length
<= maxv
);
435 #define FREEVRANGE_TRY(rangestart, rangeend) { \
436 vir_bytes frstart = (rangestart), frend = (rangeend); \
437 frstart = MAX(frstart, minv); \
438 frend = MIN(frend, maxv); \
439 if(frend > frstart && (frend - frstart) >= length) { \
440 startv = frend-length; \
444 #define FREEVRANGE(start, end) { \
445 assert(!foundflag); \
446 FREEVRANGE_TRY(((start)+VM_PAGE_SIZE), ((end)-VM_PAGE_SIZE)); \
448 FREEVRANGE_TRY((start), (end)); \
452 /* find region after maxv. */
453 region_start_iter(&vmp
->vm_regions_avl
, &iter
, maxv
, AVL_GREATER_EQUAL
);
454 lastregion
= region_get_iter(&iter
);
457 /* This is the free virtual address space after the last region. */
458 region_start_iter(&vmp
->vm_regions_avl
, &iter
, maxv
, AVL_LESS
);
459 lastregion
= region_get_iter(&iter
);
460 FREEVRANGE(lastregion
?
461 lastregion
->vaddr
+lastregion
->length
: 0, VM_DATATOP
);
465 struct vir_region
*vr
;
466 while((vr
= region_get_iter(&iter
)) && !foundflag
) {
467 struct vir_region
*nextvr
;
468 region_decr_iter(&iter
);
469 nextvr
= region_get_iter(&iter
);
470 FREEVRANGE(nextvr
? nextvr
->vaddr
+nextvr
->length
: 0,
476 printf("VM: region_find_slot: no 0x%lx bytes found for %d between 0x%lx and 0x%lx\n",
477 length
, vmp
->vm_endpoint
, minv
, maxv
);
482 /* However we got it, startv must be in the requested range. */
483 assert(startv
>= minv
);
484 assert(startv
< maxv
);
485 assert(startv
+ length
<= maxv
);
487 /* remember this position as a hint for next time. */
488 vmp
->vm_region_top
= startv
+ length
;
493 /*===========================================================================*
495 *===========================================================================*/
496 static vir_bytes
region_find_slot(struct vmproc
*vmp
,
497 vir_bytes minv
, vir_bytes maxv
, vir_bytes length
)
499 vir_bytes v
, hint
= vmp
->vm_region_top
;
501 /* use the top of the last inserted region as a minv hint if
502 * possible. remember that a zero maxv is a special case.
505 if(maxv
&& hint
< maxv
&& hint
>= minv
) {
506 v
= region_find_slot_range(vmp
, minv
, hint
, length
);
512 return region_find_slot_range(vmp
, minv
, maxv
, length
);
515 struct vir_region
*region_new(struct vmproc
*vmp
, vir_bytes startv
, vir_bytes length
, int flags
)
518 struct vir_region
*newregion
;
520 if(!(SLABALLOC(newregion
))) {
521 printf("vm: region_new: could not allocate\n");
525 /* Fill in node details. */
527 newregion
->vaddr
= startv
;
528 newregion
->length
= length
;
529 newregion
->flags
= flags
;
530 newregion
->tag
= VRT_NONE
;
531 newregion
->lower
= newregion
->higher
= NULL
;
532 newregion
->parent
= vmp
;);
536 printf("VM: region_new: allocating phys avl failed\n");
540 USE(newregion
, newregion
->phys
= phavl
;);
541 physr_init(newregion
->phys
);
546 /*===========================================================================*
548 *===========================================================================*/
549 struct vir_region
*map_page_region(vmp
, minv
, maxv
, length
,
550 what
, flags
, mapflags
)
559 struct vir_region
*newregion
;
562 assert(!(length
% VM_PAGE_SIZE
));
564 SANITYCHECK(SCL_FUNCTIONS
);
566 if((flags
& VR_CONTIG
) && !(mapflags
& MF_PREALLOC
)) {
567 printf("map_page_region: can't make contiguous allocation without preallocating\n");
571 startv
= region_find_slot(vmp
, minv
, maxv
, length
);
572 if (startv
== SLOT_FAIL
)
575 /* Now we want a new region. */
576 if(!(newregion
= region_new(vmp
, startv
, length
, flags
))) {
577 printf("VM: map_page_region: allocating region failed\n");
581 /* If we know what we're going to map to, map it right away. */
582 if(what
!= MAP_NONE
) {
583 assert(!(what
% VM_PAGE_SIZE
));
584 assert(!(startv
% VM_PAGE_SIZE
));
585 assert(!(mapflags
& MF_PREALLOC
));
586 if(map_new_physblock(vmp
, newregion
, 0, length
,
587 what
, PAF_CLEAR
, 0) != OK
) {
588 printf("VM: map_new_physblock failed\n");
590 SLABFREE(newregion
->phys
););
596 if((flags
& VR_ANON
) && (mapflags
& MF_PREALLOC
)) {
597 if(map_handle_memory(vmp
, newregion
, 0, length
, 1) != OK
) {
598 printf("VM: map_page_region: prealloc failed\n");
600 SLABFREE(newregion
->phys
););
606 /* Pre-allocations should be uninitialized, but after that it's a
609 USE(newregion
, newregion
->flags
&= ~VR_UNINITIALIZED
;);
612 region_insert(&vmp
->vm_regions_avl
, newregion
);
615 assert(startv
== newregion
->vaddr
);
617 struct vir_region
*nextvr
;
618 if((nextvr
= getnextvr(newregion
))) {
619 assert(newregion
->vaddr
< nextvr
->vaddr
);
624 SANITYCHECK(SCL_FUNCTIONS
);
629 static struct phys_region
*reset_physr_iter(struct vir_region
*region
,
630 physr_iter
*iter
, vir_bytes offset
)
632 struct phys_region
*ph
;
634 physr_start_iter(region
->phys
, iter
, offset
, AVL_EQUAL
);
635 ph
= physr_get_iter(iter
);
637 assert(ph
->offset
== offset
);
642 /*===========================================================================*
644 *===========================================================================*/
645 static int map_subfree(struct vir_region
*region
, vir_bytes len
)
647 struct phys_region
*pr
;
654 SLABSANE(region
->phys
);
655 physr_start_iter_least(region
->phys
, &iter
);
656 while((pr
= physr_get_iter(&iter
))) {
657 struct phys_region
*others
;
658 struct phys_block
*pb
;
662 for(others
= pb
->firstregion
; others
;
663 others
= others
->next_ph_list
) {
664 assert(others
->ph
== pb
);
666 physr_incr_iter(&iter
);
671 physr_start_iter_least(region
->phys
, &iter
);
672 while((pr
= physr_get_iter(&iter
))) {
673 physr_incr_iter(&iter
);
674 if(pr
->offset
>= len
)
676 if(pr
->offset
+ VM_PAGE_SIZE
<= len
) {
677 pb_unreferenced(region
, pr
);
678 physr_start_iter_least(region
->phys
, &iter
);
686 /*===========================================================================*
688 *===========================================================================*/
689 static int map_free(struct vir_region
*region
)
693 if((r
=map_subfree(region
, region
->length
)) != OK
) {
694 printf("%d\n", __LINE__
);
699 SLABFREE(region
->phys
););
705 /*===========================================================================*
706 * yielded_block_cmp *
707 *===========================================================================*/
708 int yielded_block_cmp(struct block_id
*id1
, struct block_id
*id2
)
710 if(id1
->owner
< id2
->owner
)
712 if(id1
->owner
> id2
->owner
)
714 return cmp64(id1
->id
, id2
->id
);
718 /*===========================================================================*
719 * free_yielded_proc *
720 *===========================================================================*/
721 static vir_bytes
free_yielded_proc(struct vmproc
*vmp
)
726 SANITYCHECK(SCL_FUNCTIONS
);
728 /* Free associated regions. */
729 for(h
= 0; h
< YIELD_HASHSIZE
&& vmp
->vm_yielded
> 0; h
++) {
732 yielded_avl
*avl
= &vm_yielded_blocks
[h
];
733 yielded_start_iter_least(avl
, &iter
);
734 while((yb
= yielded_get_iter(&iter
))) {
737 yielded_incr_iter(&iter
);
738 if(yb
->id
.owner
!= vmp
->vm_endpoint
)
740 next_yb
= yielded_get_iter(&iter
);
741 total
+= freeyieldednode(yb
, 1);
742 /* the above removal invalidated our iter; restart it
743 * for the node we want to start at.
746 yielded_start_iter(avl
, &iter
, next_yb
->id
, AVL_EQUAL
);
747 assert(yielded_get_iter(&iter
) == next_yb
);
755 static phys_bytes
freeyieldednode(yielded_t
*node
, int freemem
)
757 yielded_t
*older
, *younger
, *removed
;
767 younger
= node
->younger
;
772 assert(younger
->older
== node
);
773 USE(younger
, younger
->older
= node
->older
;);
775 assert(node
== lru_youngest
);
776 lru_youngest
= node
->older
;
781 assert(older
->younger
== node
);
782 USE(older
, older
->younger
= node
->younger
;);
784 assert(node
== lru_oldest
);
785 lru_oldest
= node
->younger
;
792 if(vm_isokendpt(node
->id
.owner
, &p
) != OK
)
793 panic("out of date owner of yielded block %d", node
->id
.owner
);
794 avl
= get_yielded_avl(node
->id
);
795 removed
= yielded_remove(avl
, node
->id
);
796 assert(removed
== node
);
797 assert(vmproc
[p
].vm_yielded
> 0);
798 vmproc
[p
].vm_yielded
--;
800 /* Free associated memory if requested. */
803 free_mem(ABS2CLICK(node
->addr
), 1);
812 /*========================================================================*
814 *========================================================================*/
815 vir_bytes
free_yielded(vir_bytes max_bytes
)
818 /* PRIVATE yielded_t *lru_youngest = NULL, *lru_oldest = NULL; */
822 while(freed
< max_bytes
&& lru_oldest
) {
823 SLABSANE(lru_oldest
);
824 freed
+= freeyieldednode(lru_oldest
, 1);
831 /*========================================================================*
833 *========================================================================*/
834 int map_free_proc(vmp
)
837 struct vir_region
*r
;
839 while((r
= region_search_root(&vmp
->vm_regions_avl
))) {
840 SANITYCHECK(SCL_DETAIL
);
844 region_remove(&vmp
->vm_regions_avl
, r
->vaddr
); /* For sanity checks. */
849 SANITYCHECK(SCL_DETAIL
);
852 region_init(&vmp
->vm_regions_avl
);
854 /* Free associated yielded blocks. */
855 free_yielded_proc(vmp
);
857 SANITYCHECK(SCL_FUNCTIONS
);
862 /*===========================================================================*
864 *===========================================================================*/
865 struct vir_region
*map_lookup(vmp
, offset
)
869 struct vir_region
*r
;
871 SANITYCHECK(SCL_FUNCTIONS
);
874 if(!region_search_root(&vmp
->vm_regions_avl
))
875 panic("process has no regions: %d", vmp
->vm_endpoint
);
878 if((r
= region_search(&vmp
->vm_regions_avl
, offset
, AVL_LESS_EQUAL
))) {
879 if(offset
>= r
->vaddr
&& offset
< r
->vaddr
+ r
->length
)
883 SANITYCHECK(SCL_FUNCTIONS
);
888 static u32_t
vrallocflags(u32_t flags
)
890 u32_t allocflags
= 0;
892 if(flags
& VR_PHYS64K
)
893 allocflags
|= PAF_ALIGN64K
;
894 if(flags
& VR_LOWER16MB
)
895 allocflags
|= PAF_LOWER16MB
;
896 if(flags
& VR_LOWER1MB
)
897 allocflags
|= PAF_LOWER1MB
;
898 if(flags
& VR_CONTIG
)
899 allocflags
|= PAF_CONTIG
;
904 /*===========================================================================*
905 * map_new_physblock *
906 *===========================================================================*/
907 static int map_new_physblock(vmp
, region
, start_offset
, length
,
908 what_mem
, allocflags
, written
)
910 struct vir_region
*region
;
911 vir_bytes start_offset
;
917 struct memlist
*memlist
, *ml
;
919 vir_bytes mapped
= 0;
920 vir_bytes offset
= start_offset
;
922 SANITYCHECK(SCL_FUNCTIONS
);
924 assert(!(length
% VM_PAGE_SIZE
));
926 if((region
->flags
& VR_CONTIG
) &&
927 (start_offset
> 0 || length
< region
->length
)) {
928 printf("VM: region length 0x%lx, offset 0x%lx length 0x%lx\n",
929 region
->length
, start_offset
, length
);
931 printf("VM: map_new_physblock: non-full contig allocation requested\n");
935 /* Memory for new physical block. */
936 allocflags
|= vrallocflags(region
->flags
);
938 if(allocflags
& PAF_CONTIG
) {
939 assert(what_mem
== MAP_NONE
);
940 if((what_mem
= alloc_mem(length
/VM_PAGE_SIZE
, allocflags
)) == NO_MEM
) {
943 what_mem
= CLICK2ABS(what_mem
);
944 allocflags
&= ~PAF_CONTIG
;
945 assert(what_mem
!= MAP_NONE
);
948 if(!(memlist
= alloc_mem_in_list(length
, allocflags
, what_mem
))) {
949 printf("map_new_physblock: couldn't allocate\n");
955 for(ml
= memlist
; ml
; ml
= ml
->next
) {
956 struct phys_region
*newphysr
= NULL
;
957 struct phys_block
*newpb
= NULL
;
959 /* Allocate things necessary for this chunk of memory. */
960 if(!(newpb
= pb_new(ml
->phys
)) ||
961 !(newphysr
= pb_reference(newpb
, offset
, region
))) {
962 printf("map_new_physblock: no memory for the ph slabs\n");
963 if(newphysr
) SLABFREE(newphysr
);
964 if(newpb
) SLABFREE(newpb
);
969 /* Update pagetable. */
970 if(map_ph_writept(vmp
, region
, newphysr
) != OK
) {
971 printf("map_new_physblock: map_ph_writept failed\n");
976 offset
+= VM_PAGE_SIZE
;
977 mapped
+= VM_PAGE_SIZE
;
981 offset
= start_offset
;
982 /* Things did not go well. Undo everything. */
983 for(ml
= memlist
; ml
; ml
= ml
->next
) {
984 struct phys_region
*physr
;
985 if((physr
= physr_search(region
->phys
, offset
,
987 assert(physr
->ph
->refcount
== 1);
988 pb_unreferenced(region
, physr
);
991 offset
+= VM_PAGE_SIZE
;
993 } else assert(mapped
== length
);
995 /* Always clean up the memlist itself, even if everything
996 * worked we're not using the memlist nodes any more. And
997 * the memory they reference is either freed above or in use.
999 free_mem_list(memlist
, 0);
1001 SANITYCHECK(SCL_FUNCTIONS
);
1006 /*===========================================================================*
1007 * map_clone_ph_block *
1008 *===========================================================================*/
1009 static struct phys_region
*map_clone_ph_block(vmp
, region
, ph
, iter
)
1011 struct vir_region
*region
;
1012 struct phys_region
*ph
;
1017 phys_bytes physaddr
;
1018 struct phys_region
*newpr
;
1019 int region_has_single_block
;
1022 written
= ph
->written
;
1024 SANITYCHECK(SCL_FUNCTIONS
);
1026 /* Warning: this function will free the passed
1027 * phys_region *ph and replace it (in the same offset)
1028 * with another! So both the pointer to it
1029 * and any iterators over the phys_regions in the vir_region
1030 * will be invalid on successful return. (Iterators over
1031 * the vir_region could be invalid on unsuccessful return too.)
1034 /* This is only to be done if there is more than one copy. */
1035 assert(ph
->ph
->refcount
> 1);
1037 /* This function takes a physical block, copies its contents
1038 * into newly allocated memory, and replaces the single physical
1039 * block by one or more physical blocks with refcount 1 with the
1040 * same contents as the original. In other words, a fragmentable
1041 * version of map_copy_ph_block().
1044 /* Remember where and how much. */
1045 offset
= ph
->offset
;
1046 physaddr
= ph
->ph
->phys
;
1048 /* Now unlink the original physical block so we can replace
1054 assert(ph
->ph
->refcount
> 1);
1055 pb_unreferenced(region
, ph
);
1056 assert(ph
->ph
->refcount
>= 1);
1059 SANITYCHECK(SCL_DETAIL
);
1061 /* Put new free memory in. */
1062 allocflags
= vrallocflags(region
->flags
);
1063 region_has_single_block
= (offset
== 0 && region
->length
== VM_PAGE_SIZE
);
1064 assert(region_has_single_block
|| !(allocflags
& PAF_CONTIG
));
1065 assert(!(allocflags
& PAF_CLEAR
));
1067 if(map_new_physblock(vmp
, region
, offset
, VM_PAGE_SIZE
,
1068 MAP_NONE
, allocflags
, written
) != OK
) {
1069 /* XXX original range now gone. */
1070 printf("VM: map_clone_ph_block: map_new_physblock failed.\n");
1074 /* Copy the block to the new memory.
1075 * Can only fail if map_new_physblock didn't do what we asked.
1077 if(copy_abs2region(physaddr
, region
, offset
, VM_PAGE_SIZE
) != OK
)
1078 panic("copy_abs2region failed, no good reason for that");
1080 newpr
= physr_search(region
->phys
, offset
, AVL_EQUAL
);
1082 assert(newpr
->offset
== offset
);
1085 physr_start_iter(region
->phys
, iter
, offset
, AVL_EQUAL
);
1086 assert(physr_get_iter(iter
) == newpr
);
1089 SANITYCHECK(SCL_FUNCTIONS
);
1095 /*===========================================================================*
1097 *===========================================================================*/
1098 int map_pf(vmp
, region
, offset
, write
)
1100 struct vir_region
*region
;
1105 struct phys_region
*ph
;
1108 assert(offset
>= 0);
1109 assert(offset
< region
->length
);
1111 assert(region
->flags
& VR_ANON
);
1112 assert(!(region
->vaddr
% VM_PAGE_SIZE
));
1114 virpage
= offset
- offset
% VM_PAGE_SIZE
;
1116 SANITYCHECK(SCL_FUNCTIONS
);
1118 if((ph
= physr_search(region
->phys
, offset
, AVL_LESS_EQUAL
)) &&
1119 (ph
->offset
<= offset
&& offset
< ph
->offset
+ VM_PAGE_SIZE
)) {
1120 /* Pagefault in existing block. Do copy-on-write. */
1122 assert(region
->flags
& VR_WRITABLE
);
1123 assert(ph
->ph
->refcount
> 0);
1125 if(WRITABLE(region
, ph
->ph
)) {
1126 r
= map_ph_writept(vmp
, region
, ph
);
1128 printf("map_ph_writept failed\n");
1130 if(ph
->ph
->refcount
> 0
1131 && ph
->ph
->share_flag
!= PBSH_COW
) {
1132 printf("VM: write RO mapped pages.\n");
1135 if(!map_clone_ph_block(vmp
, region
, ph
, NULL
))
1140 /* Pagefault in non-existing block. Map in new block. */
1141 if(map_new_physblock(vmp
, region
, virpage
,
1142 VM_PAGE_SIZE
, MAP_NONE
, PAF_CLEAR
, 0) != OK
) {
1143 printf("map_new_physblock failed\n");
1148 SANITYCHECK(SCL_FUNCTIONS
);
1151 printf("VM: map_pf: failed (%d)\n", r
);
1156 if(OK
!= pt_checkrange(&vmp
->vm_pt
, region
->vaddr
+virpage
,
1157 VM_PAGE_SIZE
, write
)) {
1158 panic("map_pf: pt_checkrange failed: %d", r
);
1165 /*===========================================================================*
1167 *===========================================================================*/
1168 int map_pin_memory(struct vmproc
*vmp
)
1170 struct vir_region
*vr
;
1173 region_start_iter_least(&vmp
->vm_regions_avl
, &iter
);
1174 /* Scan all memory regions. */
1175 while((vr
= region_get_iter(&iter
))) {
1176 /* Make sure region is mapped to physical memory and writable.*/
1177 r
= map_handle_memory(vmp
, vr
, 0, vr
->length
, 1);
1179 panic("map_pin_memory: map_handle_memory failed: %d", r
);
1181 region_incr_iter(&iter
);
1186 /*===========================================================================*
1187 * map_handle_memory *
1188 *===========================================================================*/
1189 int map_handle_memory(vmp
, region
, offset
, length
, write
)
1191 struct vir_region
*region
;
1192 vir_bytes offset
, length
;
1195 struct phys_region
*physr
, *nextphysr
;
1198 u32_t allocflags
= 0;
1200 if(!(region
->flags
& VR_UNINITIALIZED
)) {
1201 allocflags
= PAF_CLEAR
;
1204 #define FREE_RANGE_HERE(er1, er2) { \
1205 struct phys_region *r1 = (er1), *r2 = (er2); \
1206 vir_bytes start = offset, end = offset + length; \
1208 start = MAX(start, r1->offset + VM_PAGE_SIZE); } \
1210 end = MIN(end, r2->offset); } \
1212 SANITYCHECK(SCL_DETAIL); \
1213 if(map_new_physblock(vmp, region, start, \
1214 end-start, MAP_NONE, allocflags, 0) != OK) { \
1215 SANITYCHECK(SCL_DETAIL); \
1222 SANITYCHECK(SCL_FUNCTIONS
);
1224 assert(region
->flags
& VR_ANON
);
1225 assert(!(region
->vaddr
% VM_PAGE_SIZE
));
1226 assert(!(offset
% VM_PAGE_SIZE
));
1227 assert(!(length
% VM_PAGE_SIZE
));
1228 assert(!write
|| (region
->flags
& VR_WRITABLE
));
1230 physr_start_iter(region
->phys
, &iter
, offset
, AVL_LESS_EQUAL
);
1231 physr
= physr_get_iter(&iter
);
1234 physr_start_iter(region
->phys
, &iter
, offset
, AVL_GREATER_EQUAL
);
1235 physr
= physr_get_iter(&iter
);
1238 FREE_RANGE_HERE(NULL
, physr
);
1241 physr
= reset_physr_iter(region
, &iter
, physr
->offset
);
1242 if(physr
->offset
+ VM_PAGE_SIZE
<= offset
) {
1243 physr_incr_iter(&iter
);
1244 physr
= physr_get_iter(&iter
);
1246 FREE_RANGE_HERE(NULL
, physr
);
1248 physr
= reset_physr_iter(region
, &iter
,
1257 SANITYCHECK(SCL_DETAIL
);
1260 assert(physr
->ph
->refcount
> 0);
1261 if(!WRITABLE(region
, physr
->ph
)) {
1262 if(!(physr
= map_clone_ph_block(vmp
, region
,
1264 printf("VM: map_handle_memory: no copy\n");
1269 SANITYCHECK(SCL_DETAIL
);
1270 if((r
=map_ph_writept(vmp
, region
, physr
)) != OK
) {
1271 printf("VM: map_ph_writept failed\n");
1275 SANITYCHECK(SCL_DETAIL
);
1279 SANITYCHECK(SCL_DETAIL
);
1280 physr_incr_iter(&iter
);
1281 nextphysr
= physr_get_iter(&iter
);
1282 FREE_RANGE_HERE(physr
, nextphysr
);
1283 SANITYCHECK(SCL_DETAIL
);
1285 if(nextphysr
->offset
>= offset
+ length
)
1287 nextphysr
= reset_physr_iter(region
, &iter
,
1293 SANITYCHECK(SCL_FUNCTIONS
);
1297 printf("region start at 0x%lx offset 0x%lx len 0x%lx write %d\n",
1298 region
->vaddr
, offset
, length
, write
);
1299 printf("no changes in map_handle_memory\n");
1305 if(OK
!= pt_checkrange(&vmp
->vm_pt
, region
->vaddr
+offset
, length
, write
)) {
1306 printf("handle mem 0x%lx-0x%lx failed\n",
1307 region
->vaddr
+offset
,region
->vaddr
+offset
+length
);
1308 map_printregion(vmp
, region
);
1309 panic("checkrange failed");
1317 static int count_phys_regions(struct vir_region
*vr
)
1320 struct phys_region
*ph
;
1322 physr_start_iter_least(vr
->phys
, &iter
);
1323 while((ph
= physr_get_iter(&iter
))) {
1325 physr_incr_iter(&iter
);
1331 /*===========================================================================*
1333 *===========================================================================*/
1334 static struct vir_region
*map_copy_region(struct vmproc
*vmp
, struct vir_region
*vr
)
1336 /* map_copy_region creates a complete copy of the vir_region
1337 * data structure, linking in the same phys_blocks directly,
1338 * but all in limbo, i.e., the caller has to link the vir_region
1339 * to a process. Therefore it doesn't increase the refcount in
1340 * the phys_block; the caller has to do this once it's linked.
1341 * The reason for this is to keep the sanity checks working
1342 * within this function.
1344 struct vir_region
*newvr
;
1345 struct phys_region
*ph
;
1349 cr
= count_phys_regions(vr
);
1352 if(!(newvr
= region_new(vr
->parent
, vr
->vaddr
, vr
->length
, vr
->flags
)))
1355 physr_start_iter_least(vr
->phys
, &iter
);
1356 while((ph
= physr_get_iter(&iter
))) {
1357 struct phys_region
*newph
= pb_reference(ph
->ph
, ph
->offset
, newvr
);
1359 if(!newph
) { map_free(newvr
); return NULL
; }
1362 USE(newph
, newph
->written
= 0;);
1363 assert(count_phys_regions(vr
) == cr
);
1365 physr_incr_iter(&iter
);
1369 assert(count_phys_regions(vr
) == count_phys_regions(newvr
));
1375 /*===========================================================================*
1377 *===========================================================================*/
1378 int copy_abs2region(phys_bytes abs
, struct vir_region
*destregion
,
1379 phys_bytes offset
, phys_bytes len
)
1383 assert(destregion
->phys
);
1385 phys_bytes sublen
, suboffset
;
1386 struct phys_region
*ph
;
1388 assert(destregion
->phys
);
1389 if(!(ph
= physr_search(destregion
->phys
, offset
, AVL_LESS_EQUAL
))) {
1390 printf("VM: copy_abs2region: no phys region found (1).\n");
1393 assert(ph
->offset
<= offset
);
1394 if(ph
->offset
+VM_PAGE_SIZE
<= offset
) {
1395 printf("VM: copy_abs2region: no phys region found (2).\n");
1398 suboffset
= offset
- ph
->offset
;
1399 assert(suboffset
< VM_PAGE_SIZE
);
1401 if(sublen
> VM_PAGE_SIZE
- suboffset
)
1402 sublen
= VM_PAGE_SIZE
- suboffset
;
1403 assert(suboffset
+ sublen
<= VM_PAGE_SIZE
);
1404 if(ph
->ph
->refcount
!= 1) {
1405 printf("VM: copy_abs2region: refcount not 1.\n");
1409 if(sys_abscopy(abs
, ph
->ph
->phys
+ suboffset
, sublen
) != OK
) {
1410 printf("VM: copy_abs2region: abscopy failed.\n");
1421 /*=========================================================================*
1423 *=========================================================================*/
1424 int map_writept(struct vmproc
*vmp
)
1426 struct vir_region
*vr
;
1427 struct phys_region
*ph
;
1430 region_start_iter_least(&vmp
->vm_regions_avl
, &v_iter
);
1432 while((vr
= region_get_iter(&v_iter
))) {
1434 physr_start_iter_least(vr
->phys
, &ph_iter
);
1436 while((ph
= physr_get_iter(&ph_iter
))) {
1437 physr_incr_iter(&ph_iter
);
1439 /* If this phys block is shared as SMAP, then do
1440 * not update the page table. */
1441 if(ph
->ph
->refcount
> 1
1442 && ph
->ph
->share_flag
== PBSH_SMAP
) {
1446 if((r
=map_ph_writept(vmp
, vr
, ph
)) != OK
) {
1447 printf("VM: map_writept: failed\n");
1451 region_incr_iter(&v_iter
);
1457 /*========================================================================*
1459 *========================================================================*/
1460 int map_proc_copy(dst
, src
)
1464 /* Copy all the memory regions from the src process to the dst process. */
1465 region_init(&dst
->vm_regions_avl
);
1467 return map_proc_copy_from(dst
, src
, NULL
);
1470 /*========================================================================*
1471 * map_proc_copy_from *
1472 *========================================================================*/
1473 int map_proc_copy_from(dst
, src
, start_src_vr
)
1476 struct vir_region
*start_src_vr
;
1478 struct vir_region
*vr
;
1482 start_src_vr
= region_search_least(&src
->vm_regions_avl
);
1484 assert(start_src_vr
);
1485 assert(start_src_vr
->parent
== src
);
1486 region_start_iter(&src
->vm_regions_avl
, &v_iter
,
1487 start_src_vr
->vaddr
, AVL_EQUAL
);
1488 assert(region_get_iter(&v_iter
) == start_src_vr
);
1490 /* Copy source regions after the destination's last region (if any). */
1492 SANITYCHECK(SCL_FUNCTIONS
);
1494 while((vr
= region_get_iter(&v_iter
))) {
1495 physr_iter iter_orig
, iter_new
;
1496 struct vir_region
*newvr
;
1497 struct phys_region
*orig_ph
, *new_ph
;
1498 if(!(newvr
= map_copy_region(dst
, vr
))) {
1502 USE(newvr
, newvr
->parent
= dst
;);
1503 region_insert(&dst
->vm_regions_avl
, newvr
);
1504 physr_start_iter_least(vr
->phys
, &iter_orig
);
1505 physr_start_iter_least(newvr
->phys
, &iter_new
);
1506 while((orig_ph
= physr_get_iter(&iter_orig
))) {
1507 struct phys_block
*pb
;
1508 new_ph
= physr_get_iter(&iter_new
);
1509 /* Check two physregions both are nonnull,
1510 * are different, and match physblocks.
1514 assert(orig_ph
!= new_ph
);
1516 assert(orig_ph
->ph
== new_ph
->ph
);
1518 /* If the phys block has been shared as SMAP,
1519 * do the regular copy. */
1520 if(pb
->refcount
> 2 && pb
->share_flag
== PBSH_SMAP
) {
1521 map_clone_ph_block(dst
, newvr
,new_ph
,
1524 USE(pb
, pb
->share_flag
= PBSH_COW
;);
1527 /* Get next new physregion */
1528 physr_incr_iter(&iter_orig
);
1529 physr_incr_iter(&iter_new
);
1531 assert(!physr_get_iter(&iter_new
));
1532 region_incr_iter(&v_iter
);
1538 SANITYCHECK(SCL_FUNCTIONS
);
1542 int map_region_extend_upto_v(struct vmproc
*vmp
, vir_bytes v
)
1544 vir_bytes offset
= v
, end
;
1545 struct vir_region
*vr
, *nextvr
;
1548 if(!(vr
= region_search(&vmp
->vm_regions_avl
, offset
, AVL_LESS
))) {
1549 printf("VM: nothing to extend\n");
1553 if(!(vr
->flags
& VR_ANON
)) {
1554 printf("VM: memory range to extend not anonymous\n");
1558 assert(vr
->vaddr
<= offset
);
1559 if((nextvr
= getnextvr(vr
))) {
1560 assert(offset
<= nextvr
->vaddr
);
1563 end
= vr
->vaddr
+ vr
->length
;
1565 offset
= roundup(offset
, VM_PAGE_SIZE
);
1568 r
= map_region_extend(vmp
, vr
, offset
- end
);
1573 /*========================================================================*
1574 * map_region_extend *
1575 *========================================================================*/
1576 int map_region_extend(struct vmproc
*vmp
, struct vir_region
*vr
,
1580 struct vir_region
*nextvr
;
1583 assert(vr
->flags
& VR_ANON
);
1584 assert(!(delta
% VM_PAGE_SIZE
));
1585 if(vr
->flags
& VR_CONTIG
) {
1586 printf("VM: can't grow contig region\n");
1590 if(!delta
) return OK
;
1591 end
= vr
->vaddr
+ vr
->length
;
1592 assert(end
>= vr
->vaddr
);
1594 if(end
+ delta
<= end
) {
1595 printf("VM: strange delta 0x%lx\n", delta
);
1599 nextvr
= getnextvr(vr
);
1601 if(!nextvr
|| end
+ delta
<= nextvr
->vaddr
) {
1602 USE(vr
, vr
->length
+= delta
;);
1609 /*========================================================================*
1610 * map_region_shrink *
1611 *========================================================================*/
1612 int map_region_shrink(struct vir_region
*vr
, vir_bytes delta
)
1615 assert(vr
->flags
& VR_ANON
);
1616 assert(!(delta
% VM_PAGE_SIZE
));
1619 printf("VM: ignoring region shrink\n");
1625 struct vir_region
*map_region_lookup_tag(vmp
, tag
)
1629 struct vir_region
*vr
;
1631 region_start_iter_least(&vmp
->vm_regions_avl
, &v_iter
);
1633 while((vr
= region_get_iter(&v_iter
))) {
1636 region_incr_iter(&v_iter
);
1642 void map_region_set_tag(struct vir_region
*vr
, u32_t tag
)
1644 USE(vr
, vr
->tag
= tag
;);
1647 u32_t
map_region_get_tag(struct vir_region
*vr
)
1652 /*========================================================================*
1653 * map_unmap_region *
1654 *========================================================================*/
1655 int map_unmap_region(struct vmproc
*vmp
, struct vir_region
*r
,
1658 /* Shrink the region by 'len' bytes, from the start. Unreference
1659 * memory it used to reference if any.
1661 vir_bytes regionstart
;
1663 SANITYCHECK(SCL_FUNCTIONS
);
1665 if(len
> r
->length
|| (len
% VM_PAGE_SIZE
)) {
1666 printf("VM: bogus length 0x%lx\n", len
);
1670 if(!(r
->flags
& (VR_ANON
|VR_DIRECT
))) {
1671 printf("VM: only unmap anonymous or direct memory\n");
1675 regionstart
= r
->vaddr
;
1677 if(len
== r
->length
) {
1678 SANITYCHECK(SCL_DETAIL
);
1679 /* Whole region disappears. Unlink and free it. */
1680 region_remove(&vmp
->vm_regions_avl
, r
->vaddr
);
1683 struct phys_region
*pr
;
1685 /* Region shrinks. First unreference its memory
1686 * and then shrink the region.
1688 SANITYCHECK(SCL_DETAIL
);
1689 map_subfree(r
, len
);
1693 physr_start_iter_least(r
->phys
, &iter
);
1695 /* vaddr has increased; to make all the phys_regions
1696 * point to the same addresses, make them shrink by the
1699 while((pr
= physr_get_iter(&iter
))) {
1700 assert(pr
->offset
>= len
);
1701 USE(pr
, pr
->offset
-= len
;);
1702 physr_incr_iter(&iter
);
1706 SANITYCHECK(SCL_DETAIL
);
1708 if(pt_writemap(vmp
, &vmp
->vm_pt
, regionstart
,
1709 MAP_NONE
, len
, 0, WMF_OVERWRITE
) != OK
) {
1710 printf("VM: map_unmap_region: pt_writemap failed\n");
1714 SANITYCHECK(SCL_FUNCTIONS
);
1719 /*========================================================================*
1721 *========================================================================*/
1722 int map_remap(struct vmproc
*dvmp
, vir_bytes da
, size_t size
,
1723 struct vir_region
*region
, vir_bytes
*r
, int readonly
)
1725 struct vir_region
*vr
;
1726 struct phys_region
*ph
;
1727 vir_bytes startv
, dst_addr
;
1730 SANITYCHECK(SCL_FUNCTIONS
);
1732 assert(region
->flags
& VR_SHARED
);
1734 /* da is handled differently */
1740 /* round up to page size */
1741 assert(!(size
% VM_PAGE_SIZE
));
1742 startv
= region_find_slot(dvmp
, dst_addr
, VM_DATATOP
, size
);
1743 if (startv
== SLOT_FAIL
) {
1746 /* when the user specifies the address, we cannot change it */
1747 if (da
&& (startv
!= dst_addr
))
1750 vr
= map_copy_region(dvmp
, region
);
1757 vr
->flags
= region
->flags
;
1761 vr
->flags
&= ~VR_WRITABLE
;
1764 assert(vr
->flags
& VR_SHARED
);
1766 region_insert(&dvmp
->vm_regions_avl
, vr
);
1768 physr_start_iter_least(vr
->phys
, &iter
);
1769 while((ph
= physr_get_iter(&iter
))) {
1770 if(map_ph_writept(dvmp
, vr
, ph
) != OK
) {
1771 panic("map_remap: map_ph_writept failed");
1773 physr_incr_iter(&iter
);
1778 SANITYCHECK(SCL_FUNCTIONS
);
1783 /*========================================================================*
1785 *========================================================================*/
1786 int map_get_phys(struct vmproc
*vmp
, vir_bytes addr
, phys_bytes
*r
)
1788 struct vir_region
*vr
;
1789 struct phys_region
*ph
;
1792 if (!(vr
= map_lookup(vmp
, addr
)) ||
1793 (vr
->vaddr
!= addr
))
1796 if (!(vr
->flags
& VR_SHARED
))
1799 physr_start_iter_least(vr
->phys
, &iter
);
1800 ph
= physr_get_iter(&iter
);
1810 /*========================================================================*
1812 *========================================================================*/
1813 int map_get_ref(struct vmproc
*vmp
, vir_bytes addr
, u8_t
*cnt
)
1815 struct vir_region
*vr
;
1816 struct phys_region
*ph
;
1819 if (!(vr
= map_lookup(vmp
, addr
)) ||
1820 (vr
->vaddr
!= addr
))
1823 if (!(vr
->flags
& VR_SHARED
))
1826 physr_start_iter_least(vr
->phys
, &iter
);
1827 ph
= physr_get_iter(&iter
);
1832 *cnt
= ph
->ph
->refcount
;
1837 /*========================================================================*
1839 *========================================================================*/
1840 void get_stats_info(struct vm_stats_info
*vsi
)
1844 vsi
->vsi_cached
= 0L;
1846 for(yb
= lru_youngest
; yb
; yb
= yb
->older
)
1850 /*========================================================================*
1852 *========================================================================*/
1853 void get_usage_info(struct vmproc
*vmp
, struct vm_usage_info
*vui
)
1855 struct vir_region
*vr
;
1857 struct phys_region
*ph
;
1859 region_start_iter_least(&vmp
->vm_regions_avl
, &v_iter
);
1861 memset(vui
, 0, sizeof(*vui
));
1863 while((vr
= region_get_iter(&v_iter
))) {
1864 physr_start_iter_least(vr
->phys
, &iter
);
1865 while((ph
= physr_get_iter(&iter
))) {
1866 /* All present pages are counted towards the total. */
1867 vui
->vui_total
+= VM_PAGE_SIZE
;
1869 if (ph
->ph
->refcount
> 1) {
1870 /* Any page with a refcount > 1 is common. */
1871 vui
->vui_common
+= VM_PAGE_SIZE
;
1873 /* Any common, non-COW page is shared. */
1874 if (vr
->flags
& VR_SHARED
||
1875 ph
->ph
->share_flag
== PBSH_SMAP
)
1876 vui
->vui_shared
+= VM_PAGE_SIZE
;
1878 physr_incr_iter(&iter
);
1880 region_incr_iter(&v_iter
);
1884 /*===========================================================================*
1886 *===========================================================================*/
1887 int get_region_info(struct vmproc
*vmp
, struct vm_region_info
*vri
,
1888 int max
, vir_bytes
*nextp
)
1890 struct vir_region
*vr
;
1899 region_start_iter(&vmp
->vm_regions_avl
, &v_iter
, next
, AVL_GREATER_EQUAL
);
1900 if(!(vr
= region_get_iter(&v_iter
))) return 0;
1902 for(count
= 0; (vr
= region_get_iter(&v_iter
)) && count
< max
; count
++, vri
++) {
1903 struct phys_region
*ph1
, *ph2
;
1905 /* Report part of the region that's actually in use. */
1907 /* Get first and last phys_regions, if any */
1908 ph1
= physr_search_least(vr
->phys
);
1909 ph2
= physr_search_greatest(vr
->phys
);
1910 if(!ph1
|| !ph2
) { assert(!ph1
&& !ph2
); continue; }
1912 /* Report start+length of region starting from lowest use. */
1913 vri
->vri_addr
= vr
->vaddr
+ ph1
->offset
;
1915 vri
->vri_length
= ph2
->offset
+ VM_PAGE_SIZE
- ph1
->offset
;
1917 /* "AND" the provided protection with per-page protection. */
1918 if (!(vr
->flags
& VR_WRITABLE
))
1919 vri
->vri_prot
&= ~PROT_WRITE
;
1921 vri
->vri_flags
= (vr
->flags
& VR_SHARED
) ? MAP_IPC_SHARED
: 0;
1923 next
= vr
->vaddr
+ vr
->length
;
1924 region_incr_iter(&v_iter
);
1931 /*========================================================================*
1932 * regionprintstats *
1933 *========================================================================*/
1934 void printregionstats(struct vmproc
*vmp
)
1936 struct vir_region
*vr
;
1937 struct phys_region
*pr
;
1939 vir_bytes used
= 0, weighted
= 0;
1941 region_start_iter_least(&vmp
->vm_regions_avl
, &v_iter
);
1943 while((vr
= region_get_iter(&v_iter
))) {
1944 region_incr_iter(&v_iter
);
1945 if(vr
->flags
& VR_DIRECT
)
1947 physr_start_iter_least(vr
->phys
, &iter
);
1948 while((pr
= physr_get_iter(&iter
))) {
1949 physr_incr_iter(&iter
);
1950 used
+= VM_PAGE_SIZE
;
1951 weighted
+= VM_PAGE_SIZE
/ pr
->ph
->refcount
;
1955 printf("%6lukB %6lukB\n", used
/1024, weighted
/1024);
1960 /*===========================================================================*
1962 *===========================================================================*/
1963 static int do_map_memory(struct vmproc
*vms
, struct vmproc
*vmd
,
1964 struct vir_region
*vrs
, struct vir_region
*vrd
,
1965 vir_bytes offset_s
, vir_bytes offset_d
,
1966 vir_bytes length
, int flag
)
1968 struct phys_region
*prs
;
1969 struct phys_region
*newphysr
;
1970 struct phys_block
*pb
;
1972 u32_t pt_flag
= PTF_PRESENT
| PTF_USER
;
1975 /* Search for the first phys region in the source process. */
1976 physr_start_iter(vrs
->phys
, &iter
, offset_s
, AVL_EQUAL
);
1977 prs
= physr_get_iter(&iter
);
1979 panic("do_map_memory: no aligned phys region: %d", 0);
1981 /* flag: 0 -> read-only
1983 * -1 -> share as COW, so read-only
1986 pt_flag
|= PTF_WRITE
;
1988 pt_flag
|= PTF_READ
;
1990 /* Map phys blocks in the source process to the destination process. */
1991 end
= offset_d
+ length
;
1992 while((prs
= physr_get_iter(&iter
)) && offset_d
< end
) {
1993 /* If a SMAP share was requested but the phys block has already
1994 * been shared as COW, copy the block for the source phys region
1998 if(flag
>= 0 && pb
->refcount
> 1
1999 && pb
->share_flag
== PBSH_COW
) {
2000 if(!(prs
= map_clone_ph_block(vms
, vrs
, prs
, &iter
)))
2005 /* Allocate a new phys region. */
2006 if(!(newphysr
= pb_reference(pb
, offset_d
, vrd
)))
2009 /* If a COW share was requested but the phys block has already
2010 * been shared as SMAP, give up on COW and copy the block for
2011 * the destination phys region now.
2013 if(flag
< 0 && pb
->refcount
> 1
2014 && pb
->share_flag
== PBSH_SMAP
) {
2015 if(!(newphysr
= map_clone_ph_block(vmd
, vrd
,
2021 /* See if this is a COW share or SMAP share. */
2022 if(flag
< 0) { /* COW share */
2023 pb
->share_flag
= PBSH_COW
;
2024 /* Update the page table for the src process. */
2025 pt_writemap(vms
, &vms
->vm_pt
, offset_s
+ vrs
->vaddr
,
2026 pb
->phys
, VM_PAGE_SIZE
,
2027 pt_flag
, WMF_OVERWRITE
);
2029 else { /* SMAP share */
2030 pb
->share_flag
= PBSH_SMAP
;
2032 /* Update the page table for the destination process. */
2033 pt_writemap(vmd
, &vmd
->vm_pt
, offset_d
+ vrd
->vaddr
,
2034 pb
->phys
, VM_PAGE_SIZE
, pt_flag
, WMF_OVERWRITE
);
2037 physr_incr_iter(&iter
);
2038 offset_d
+= VM_PAGE_SIZE
;
2039 offset_s
+= VM_PAGE_SIZE
;
2044 /*===========================================================================*
2046 *===========================================================================*/
2047 int unmap_memory(endpoint_t sour
, endpoint_t dest
,
2048 vir_bytes virt_s
, vir_bytes virt_d
, vir_bytes length
, int flag
)
2051 struct vir_region
*vrd
;
2052 struct phys_region
*pr
;
2053 struct phys_block
*pb
;
2058 /* Use information on the destination process to unmap. */
2059 if(vm_isokendpt(dest
, &p
) != OK
)
2060 panic("unmap_memory: bad endpoint: %d", dest
);
2063 vrd
= map_lookup(vmd
, virt_d
);
2066 /* Search for the first phys region in the destination process. */
2067 off
= virt_d
- vrd
->vaddr
;
2068 physr_start_iter(vrd
->phys
, &iter
, off
, AVL_EQUAL
);
2069 pr
= physr_get_iter(&iter
);
2071 panic("unmap_memory: no aligned phys region: %d", 0);
2073 /* Copy the phys block now rather than doing COW. */
2075 while((pr
= physr_get_iter(&iter
)) && off
< end
) {
2077 assert(pb
->refcount
> 1);
2078 assert(pb
->share_flag
== PBSH_SMAP
);
2080 if(!(pr
= map_clone_ph_block(vmd
, vrd
, pr
, &iter
)))
2083 physr_incr_iter(&iter
);
2084 off
+= VM_PAGE_SIZE
;
2091 /*===========================================================================*
2093 *===========================================================================*/
2094 static void rm_phys_regions(struct vir_region
*region
,
2095 vir_bytes begin
, vir_bytes length
)
2097 /* Remove all phys regions between @begin and @begin+length.
2099 * Don't update the page table, because we will update it at map_memory()
2102 struct phys_region
*pr
;
2105 physr_start_iter(region
->phys
, &iter
, begin
, AVL_GREATER_EQUAL
);
2106 while((pr
= physr_get_iter(&iter
)) && pr
->offset
< begin
+ length
) {
2107 pb_unreferenced(region
, pr
);
2108 physr_start_iter(region
->phys
, &iter
, begin
,
2114 /*===========================================================================*
2116 *===========================================================================*/
2117 int map_memory(endpoint_t sour
, endpoint_t dest
,
2118 vir_bytes virt_s
, vir_bytes virt_d
, vir_bytes length
, int flag
)
2120 /* This is the entry point. This function will be called by handle_memory() when
2121 * VM recieves a map-memory request.
2123 struct vmproc
*vms
, *vmd
;
2124 struct vir_region
*vrs
, *vrd
;
2125 vir_bytes offset_s
, offset_d
;
2129 if(vm_isokendpt(sour
, &p
) != OK
)
2130 panic("map_memory: bad endpoint: %d", sour
);
2132 if(vm_isokendpt(dest
, &p
) != OK
)
2133 panic("map_memory: bad endpoint: %d", dest
);
2136 vrs
= map_lookup(vms
, virt_s
);
2138 vrd
= map_lookup(vmd
, virt_d
);
2141 /* Linear address -> offset from start of vir region. */
2142 offset_s
= virt_s
- vrs
->vaddr
;
2143 offset_d
= virt_d
- vrd
->vaddr
;
2145 /* Make sure that the range in the source process has been mapped
2146 * to physical memory.
2148 map_handle_memory(vms
, vrs
, offset_s
, length
, 0);
2151 rm_phys_regions(vrd
, offset_d
, length
);
2154 r
= do_map_memory(vms
, vmd
, vrs
, vrd
, offset_s
, offset_d
, length
, flag
);
2159 /*===========================================================================*
2160 * get_clean_phys_region *
2161 *===========================================================================*/
2162 static struct phys_region
*
2163 get_clean_phys_region(struct vmproc
*vmp
, vir_bytes vaddr
, struct vir_region
**ret_region
)
2165 struct vir_region
*region
;
2166 vir_bytes regionoffset
, mapaddr
;
2167 struct phys_region
*ph
;
2171 if(!(region
= map_lookup(vmp
, mapaddr
))) {
2172 printf("VM: get_clean_phys_region: 0x%lx not found\n", vaddr
);
2176 if(!(region
->flags
& VR_ANON
)) {
2177 printf("VM: get_clean_phys_region: non-anon 0x%lx\n", vaddr
);
2181 assert(mapaddr
>= region
->vaddr
);
2182 assert(mapaddr
< region
->vaddr
+ region
->length
);
2184 regionoffset
= mapaddr
-region
->vaddr
;
2186 /* For now, only support the yielding of blocks that are
2187 * exactly a mapped phys_region. Go get that phys_region.
2188 * (This can be improved without changing the interface.)
2190 if(!(ph
= physr_search(region
->phys
, regionoffset
,
2192 printf("VM: get_clean_phys_region: exact block not found\n");
2196 /* Make sure this is what we asked for. */
2197 assert(ph
->offset
== regionoffset
);
2199 /* If it's mapped more than once, make a copy. */
2200 assert(ph
->ph
->refcount
> 0);
2201 if(ph
->ph
->refcount
> 1) {
2202 if(!(ph
= map_clone_ph_block(vmp
, region
,
2204 printf("VM: get_clean_phys_region: ph copy failed\n");
2209 assert(ph
->ph
->refcount
== 1);
2211 *ret_region
= region
;
2216 static int getblock(struct vmproc
*vmp
, u64_t id
, vir_bytes vaddr
)
2219 struct phys_region
*ph
;
2220 struct vir_region
*region
;
2224 /* Try to get the yielded block */
2225 blockid
.owner
= vmp
->vm_endpoint
;
2227 avl
= get_yielded_avl(blockid
);
2228 if(!(yb
= yielded_search(avl
, blockid
, AVL_EQUAL
))) {
2232 /* Get the intended phys region, make sure refcount is 1. */
2233 if(!(ph
= get_clean_phys_region(vmp
, vaddr
, ®ion
))) {
2234 printf("VM: getblock: not found for %d\n", vmp
->vm_endpoint
);
2238 assert(ph
->ph
->refcount
== 1);
2240 /* Free the block that is currently there. */
2241 free_mem(ABS2CLICK(ph
->ph
->phys
), 1);
2243 /* Set the phys block to new addr and update pagetable. */
2244 USE(ph
->ph
, ph
->ph
->phys
= yb
->addr
;);
2245 if(map_ph_writept(vmp
, region
, ph
) != OK
) {
2246 /* Presumably it was mapped, so there is no reason
2247 * updating should fail.
2249 panic("do_get_block: couldn't write pt");
2252 /* Forget about the yielded block and free the struct. */
2253 freeyieldednode(yb
, 0);
2258 static int yieldblock(struct vmproc
*vmp
, u64_t id
,
2259 vir_bytes vaddr
, yielded_t
**retyb
)
2262 vir_bytes mem_clicks
, clicks
;
2263 struct vir_region
*region
;
2264 struct phys_region
*ph
;
2268 /* Makes no sense if yielded block ID already exists, and
2269 * is likely a serious bug in the caller.
2272 blockid
.owner
= vmp
->vm_endpoint
;
2273 avl
= get_yielded_avl(blockid
);
2274 if(yielded_search(avl
, blockid
, AVL_EQUAL
)) {
2279 if(vaddr
% VM_PAGE_SIZE
) return EFAULT
;
2281 if(!(ph
= get_clean_phys_region(vmp
, vaddr
, ®ion
))) {
2282 printf("VM: do_yield_block: not found for %d\n",
2287 /* Make a new block to record the yielding in. */
2288 if(!SLABALLOC(newyb
)) {
2292 assert(!(ph
->ph
->phys
% VM_PAGE_SIZE
));
2295 if((mem_clicks
= alloc_mem(clicks
, PAF_CLEAR
)) == NO_MEM
) {
2300 /* Update yielded block info. */
2302 newyb
->id
= blockid
;
2303 newyb
->addr
= ph
->ph
->phys
;
2304 newyb
->younger
= NULL
;);
2306 /* Set new phys block to new addr and update pagetable. */
2308 ph
->ph
->phys
= CLICK2ABS(mem_clicks
););
2309 if(map_ph_writept(vmp
, region
, ph
) != OK
) {
2310 /* Presumably it was mapped, so there is no reason
2311 * updating should fail.
2313 panic("yield_block: couldn't write pt");
2316 /* Remember yielded block. */
2318 yielded_insert(avl
, newyb
);
2321 /* Add to LRU list too. It's the youngest block. */
2326 lru_youngest
->younger
= newyb
;);
2332 newyb
->older
= lru_youngest
;);
2334 lru_youngest
= newyb
;
2344 /*===========================================================================*
2346 *===========================================================================*/
2347 int do_forgetblocks(message
*m
)
2351 endpoint_t caller
= m
->m_source
;
2353 if(vm_isokendpt(caller
, &n
) != OK
)
2354 panic("do_yield_block: message from strange source: %d",
2359 free_yielded_proc(vmp
);
2364 /*===========================================================================*
2366 *===========================================================================*/
2367 int do_forgetblock(message
*m
)
2371 endpoint_t caller
= m
->m_source
;
2377 if(vm_isokendpt(caller
, &n
) != OK
)
2378 panic("do_yield_block: message from strange source: %d",
2383 id
= make64(m
->VMFB_IDLO
, m
->VMFB_IDHI
);
2386 blockid
.owner
= vmp
->vm_endpoint
;
2387 avl
= get_yielded_avl(blockid
);
2388 if((yb
= yielded_search(avl
, blockid
, AVL_EQUAL
))) {
2389 freeyieldednode(yb
, 1);
2395 /*===========================================================================*
2396 * do_yieldblockgetblock *
2397 *===========================================================================*/
2398 int do_yieldblockgetblock(message
*m
)
2400 u64_t yieldid
, getid
;
2402 endpoint_t caller
= m
->m_source
;
2404 yielded_t
*yb
= NULL
;
2407 if(vm_isokendpt(caller
, &n
) != OK
)
2408 panic("do_yieldblockgetblock: message from strange source: %d",
2413 if(m
->VMYBGB_LEN
!= VM_PAGE_SIZE
) {
2414 static int printed
= 0;
2417 printf("vm: secondary cache for non-page-sized blocks temporarily disabled\n");
2422 yieldid
= make64(m
->VMYBGB_YIELDIDLO
, m
->VMYBGB_YIELDIDHI
);
2423 getid
= make64(m
->VMYBGB_GETIDLO
, m
->VMYBGB_GETIDHI
);
2425 if(cmp64(yieldid
, VM_BLOCKID_NONE
) != 0) {
2426 /* A block was given to yield. */
2427 yieldblock(vmp
, yieldid
, (vir_bytes
) m
->VMYBGB_VADDR
, &yb
);
2430 if(cmp64(getid
, VM_BLOCKID_NONE
) != 0) {
2431 /* A block was given to get. */
2432 r
= getblock(vmp
, getid
, (vir_bytes
) m
->VMYBGB_VADDR
);
2438 void map_setparent(struct vmproc
*vmp
)
2441 struct vir_region
*vr
;
2442 region_start_iter_least(&vmp
->vm_regions_avl
, &iter
);
2443 while((vr
= region_get_iter(&iter
))) {
2444 USE(vr
, vr
->parent
= vmp
;);
2445 region_incr_iter(&iter
);