5 #include <minix/callnr.h>
6 #include <minix/type.h>
7 #include <minix/config.h>
8 #include <minix/const.h>
9 #include <minix/sysutil.h>
10 #include <minix/syslib.h>
11 #include <minix/debug.h>
12 #include <minix/bitmap.h>
13 #include <minix/hash.h>
23 #include <sys/param.h>
30 #include "sanitycheck.h"
35 static yielded_t
*lru_youngest
= NULL
, *lru_oldest
= NULL
;
37 /* Should a physblock be mapped writable? */
38 #define WRITABLE(r, pb) \
39 (((r)->flags & VR_WRITABLE) && \
40 (((r)->flags & (VR_DIRECT | VR_SHARED)) || \
43 static int map_new_physblock(struct vmproc
*vmp
, struct vir_region
44 *region
, vir_bytes offset
, vir_bytes length
, phys_bytes what
, u32_t
45 allocflags
, int written
);
47 static int map_ph_writept(struct vmproc
*vmp
, struct vir_region
*vr
,
48 struct phys_region
*pr
);
50 static phys_bytes
freeyieldednode(yielded_t
*node
, int freemem
);
52 static struct vir_region
*map_copy_region(struct vmproc
*vmp
, struct
55 static struct phys_region
*map_clone_ph_block(struct vmproc
*vmp
,
56 struct vir_region
*region
, struct phys_region
*ph
, physr_iter
*iter
);
59 static void lrucheck(void);
62 /* hash table of yielded blocks */
63 #define YIELD_HASHSIZE 65536
64 static yielded_avl vm_yielded_blocks
[YIELD_HASHSIZE
];
66 static int avl_inited
= 0;
68 void map_region_init(void)
72 for(h
= 0; h
< YIELD_HASHSIZE
; h
++)
73 yielded_init(&vm_yielded_blocks
[h
]);
77 static yielded_avl
*get_yielded_avl(block_id_t id
)
83 hash_i_64(id
.owner
, id
.id
, h
);
84 h
= h
% YIELD_HASHSIZE
;
87 assert(h
< YIELD_HASHSIZE
);
89 return &vm_yielded_blocks
[h
];
92 static char *map_name(struct vir_region
*vr
)
94 static char name
[100];
96 int type
= vr
->flags
& (VR_ANON
|VR_DIRECT
);
99 typename
= "anonymous";
105 panic("unknown mapping type: %d", type
);
122 tag
= "unknown tag value";
126 sprintf(name
, "%s, %s", typename
, tag
);
131 void map_printregion(struct vmproc
*vmp
, struct vir_region
*vr
)
134 struct phys_region
*ph
;
135 printf("map_printmap: map_name: %s\n", map_name(vr
));
136 printf("\t%lx (len 0x%lx, %lukB), %p\n",
137 vr
->vaddr
, vr
->length
, vr
->length
/1024, map_name(vr
));
138 printf("\t\tphysblocks:\n");
139 physr_start_iter_least(vr
->phys
, &iter
);
140 while((ph
= physr_get_iter(&iter
))) {
141 printf("\t\t@ %lx (refs %d): phys 0x%lx\n",
142 (vr
->vaddr
+ ph
->offset
),
143 ph
->ph
->refcount
, ph
->ph
->phys
);
144 physr_incr_iter(&iter
);
148 /*===========================================================================*
150 *===========================================================================*/
151 void map_printmap(vmp
)
154 struct vir_region
*vr
;
157 printf("memory regions in process %d:\n", vmp
->vm_endpoint
);
159 region_start_iter_least(&vmp
->vm_regions_avl
, &iter
);
160 while((vr
= region_get_iter(&iter
))) {
161 map_printregion(vmp
, vr
);
162 region_incr_iter(&iter
);
166 static struct vir_region
*getnextvr(struct vir_region
*vr
)
168 struct vir_region
*nextvr
;
171 region_start_iter(&vr
->parent
->vm_regions_avl
, &v_iter
, vr
->vaddr
, AVL_EQUAL
);
172 assert(region_get_iter(&v_iter
));
173 assert(region_get_iter(&v_iter
) == vr
);
174 region_incr_iter(&v_iter
);
175 nextvr
= region_get_iter(&v_iter
);
176 if(!nextvr
) return NULL
;
178 assert(vr
->parent
== nextvr
->parent
);
179 assert(vr
->vaddr
< nextvr
->vaddr
);
180 assert(vr
->vaddr
+ vr
->length
<= nextvr
->vaddr
);
186 /*===========================================================================*
187 * map_sanitycheck_pt *
188 *===========================================================================*/
189 static int map_sanitycheck_pt(struct vmproc
*vmp
,
190 struct vir_region
*vr
, struct phys_region
*pr
)
192 struct phys_block
*pb
= pr
->ph
;
201 r
= pt_writemap(vmp
, &vmp
->vm_pt
, vr
->vaddr
+ pr
->offset
,
202 pb
->phys
, VM_PAGE_SIZE
, PTF_PRESENT
| PTF_USER
| rw
, WMF_VERIFY
);
205 printf("proc %d phys_region 0x%lx sanity check failed\n",
206 vmp
->vm_endpoint
, pr
->offset
);
207 map_printregion(vmp
, vr
);
213 /*===========================================================================*
215 *===========================================================================*/
216 void map_sanitycheck(char *file
, int line
)
222 /* Macro for looping over all physical blocks of all regions of
225 #define ALLREGIONS(regioncode, physcode) \
226 for(vmp = vmproc; vmp < &vmproc[VMP_NR]; vmp++) { \
227 region_iter v_iter; \
228 struct vir_region *vr; \
229 if(!(vmp->vm_flags & VMF_INUSE)) \
231 region_start_iter_least(&vmp->vm_regions_avl, &v_iter); \
232 while((vr = region_get_iter(&v_iter))) { \
234 struct phys_region *pr; \
236 physr_start_iter_least(vr->phys, &iter); \
237 while((pr = physr_get_iter(&iter))) { \
239 physr_incr_iter(&iter); \
241 region_incr_iter(&v_iter); \
245 #define MYSLABSANE(s) MYASSERT(slabsane_f(__FILE__, __LINE__, s, sizeof(*(s))))
246 /* Basic pointers check. */
247 ALLREGIONS(MYSLABSANE(vr
),MYSLABSANE(pr
); MYSLABSANE(pr
->ph
);MYSLABSANE(pr
->parent
));
248 ALLREGIONS(/* MYASSERT(vr->parent == vmp) */,MYASSERT(pr
->parent
== vr
););
250 /* Do counting for consistency check. */
251 ALLREGIONS(;,USE(pr
->ph
, pr
->ph
->seencount
= 0;););
252 ALLREGIONS(;,USE(pr
->ph
, pr
->ph
->seencount
++;);
253 if(pr
->ph
->seencount
== 1) {
254 if(!(pr
->parent
->flags
& VR_DIRECT
)) {
255 MYASSERT(usedpages_add(pr
->ph
->phys
,
256 VM_PAGE_SIZE
) == OK
);
261 /* Do consistency check. */
262 ALLREGIONS({ struct vir_region
*nextvr
= getnextvr(vr
);
264 MYASSERT(vr
->vaddr
< nextvr
->vaddr
);
265 MYASSERT(vr
->vaddr
+ vr
->length
<= nextvr
->vaddr
);
268 MYASSERT(!(vr
->vaddr
% VM_PAGE_SIZE
));,
269 if(pr
->ph
->refcount
!= pr
->ph
->seencount
) {
271 printf("ph in vr %p: 0x%lx refcount %u "
272 "but seencount %u\n",
274 pr
->ph
->refcount
, pr
->ph
->seencount
);
278 struct phys_region
*others
;
279 if(pr
->ph
->refcount
> 0) {
280 MYASSERT(pr
->ph
->firstregion
);
281 if(pr
->ph
->refcount
== 1) {
282 MYASSERT(pr
->ph
->firstregion
== pr
);
285 MYASSERT(!pr
->ph
->firstregion
);
287 for(others
= pr
->ph
->firstregion
; others
;
288 others
= others
->next_ph_list
) {
290 MYASSERT(others
->ph
== pr
->ph
);
293 MYASSERT(pr
->ph
->refcount
== n_others
);
295 MYASSERT(pr
->ph
->refcount
== pr
->ph
->seencount
);
296 MYASSERT(!(pr
->offset
% VM_PAGE_SIZE
)););
297 ALLREGIONS(,MYASSERT(map_sanitycheck_pt(vmp
, vr
, pr
) == OK
));
300 #define LRUCHECK lrucheck()
302 static void lrucheck(void)
306 /* list is empty and ok if both ends point to null. */
307 if(!lru_youngest
&& !lru_oldest
)
310 /* if not, both should point to something. */
311 SLABSANE(lru_youngest
);
312 SLABSANE(lru_oldest
);
314 assert(!lru_youngest
->younger
);
315 assert(!lru_oldest
->older
);
317 for(list
= lru_youngest
; list
; list
= list
->older
) {
320 SLABSANE(list
->younger
);
321 assert(list
->younger
->older
== list
);
322 } else assert(list
== lru_youngest
);
324 SLABSANE(list
->older
);
325 assert(list
->older
->younger
== list
);
326 } else assert(list
== lru_oldest
);
330 void blockstats(void)
338 s
= getuptime(&ticks
);
344 for(list
= lru_youngest
; list
; list
= list
->older
) {
350 printf("%d blocks, %lukB; ", blocks
, mem
/1024);
359 /*=========================================================================*
361 *=========================================================================*/
362 static int map_ph_writept(struct vmproc
*vmp
, struct vir_region
*vr
,
363 struct phys_region
*pr
)
366 struct phys_block
*pb
= pr
->ph
;
368 assert(!(vr
->vaddr
% VM_PAGE_SIZE
));
369 assert(!(pr
->offset
% VM_PAGE_SIZE
));
370 assert(pb
->refcount
> 0);
377 if(pt_writemap(vmp
, &vmp
->vm_pt
, vr
->vaddr
+ pr
->offset
,
378 pb
->phys
, VM_PAGE_SIZE
, PTF_PRESENT
| PTF_USER
| rw
,
382 WMF_OVERWRITE
) != OK
) {
383 printf("VM: map_writept: pt_writemap failed\n");
388 USE(pr
, pr
->written
= 1;);
394 #define SLOT_FAIL ((vir_bytes) -1)
396 /*===========================================================================*
397 * region_find_slot_range *
398 *===========================================================================*/
399 static vir_bytes
region_find_slot_range(struct vmproc
*vmp
,
400 vir_bytes minv
, vir_bytes maxv
, vir_bytes length
)
402 struct vir_region
*lastregion
;
403 vir_bytes startv
= 0;
407 SANITYCHECK(SCL_FUNCTIONS
);
409 /* Length must be reasonable. */
412 /* Special case: allow caller to set maxv to 0 meaning 'I want
413 * it to be mapped in right here.'
416 maxv
= minv
+ length
;
420 printf("region_find_slot: minv 0x%lx and bytes 0x%lx\n",
426 /* Basic input sanity checks. */
427 assert(!(length
% VM_PAGE_SIZE
));
429 printf("VM: 1 minv: 0x%lx maxv: 0x%lx length: 0x%lx\n",
435 if(minv
+ length
> maxv
)
438 #define FREEVRANGE_TRY(rangestart, rangeend) { \
439 vir_bytes frstart = (rangestart), frend = (rangeend); \
440 frstart = MAX(frstart, minv); \
441 frend = MIN(frend, maxv); \
442 if(frend > frstart && (frend - frstart) >= length) { \
443 startv = frend-length; \
447 #define FREEVRANGE(start, end) { \
448 assert(!foundflag); \
449 FREEVRANGE_TRY(((start)+VM_PAGE_SIZE), ((end)-VM_PAGE_SIZE)); \
451 FREEVRANGE_TRY((start), (end)); \
455 /* find region after maxv. */
456 region_start_iter(&vmp
->vm_regions_avl
, &iter
, maxv
, AVL_GREATER_EQUAL
);
457 lastregion
= region_get_iter(&iter
);
460 /* This is the free virtual address space after the last region. */
461 region_start_iter(&vmp
->vm_regions_avl
, &iter
, maxv
, AVL_LESS
);
462 lastregion
= region_get_iter(&iter
);
463 FREEVRANGE(lastregion
?
464 lastregion
->vaddr
+lastregion
->length
: 0, VM_DATATOP
);
468 struct vir_region
*vr
;
469 while((vr
= region_get_iter(&iter
)) && !foundflag
) {
470 struct vir_region
*nextvr
;
471 region_decr_iter(&iter
);
472 nextvr
= region_get_iter(&iter
);
473 FREEVRANGE(nextvr
? nextvr
->vaddr
+nextvr
->length
: 0,
479 printf("VM: region_find_slot: no 0x%lx bytes found for %d between 0x%lx and 0x%lx\n",
480 length
, vmp
->vm_endpoint
, minv
, maxv
);
485 /* However we got it, startv must be in the requested range. */
486 assert(startv
>= minv
);
487 assert(startv
< maxv
);
488 assert(startv
+ length
<= maxv
);
490 /* remember this position as a hint for next time. */
491 vmp
->vm_region_top
= startv
+ length
;
496 /*===========================================================================*
498 *===========================================================================*/
499 static vir_bytes
region_find_slot(struct vmproc
*vmp
,
500 vir_bytes minv
, vir_bytes maxv
, vir_bytes length
)
502 vir_bytes v
, hint
= vmp
->vm_region_top
;
504 /* use the top of the last inserted region as a minv hint if
505 * possible. remember that a zero maxv is a special case.
508 if(maxv
&& hint
< maxv
&& hint
>= minv
) {
509 v
= region_find_slot_range(vmp
, minv
, hint
, length
);
515 return region_find_slot_range(vmp
, minv
, maxv
, length
);
518 struct vir_region
*region_new(struct vmproc
*vmp
, vir_bytes startv
, vir_bytes length
, int flags
)
521 struct vir_region
*newregion
;
523 if(!(SLABALLOC(newregion
))) {
524 printf("vm: region_new: could not allocate\n");
528 /* Fill in node details. */
530 newregion
->vaddr
= startv
;
531 newregion
->length
= length
;
532 newregion
->flags
= flags
;
533 newregion
->tag
= VRT_NONE
;
534 newregion
->lower
= newregion
->higher
= NULL
;
535 newregion
->parent
= vmp
;);
539 printf("VM: region_new: allocating phys avl failed\n");
543 USE(newregion
, newregion
->phys
= phavl
;);
544 physr_init(newregion
->phys
);
549 /*===========================================================================*
551 *===========================================================================*/
552 struct vir_region
*map_page_region(vmp
, minv
, maxv
, length
,
553 what
, flags
, mapflags
)
562 struct vir_region
*newregion
;
565 assert(!(length
% VM_PAGE_SIZE
));
567 SANITYCHECK(SCL_FUNCTIONS
);
569 if((flags
& VR_CONTIG
) && !(mapflags
& MF_PREALLOC
)) {
570 printf("map_page_region: can't make contiguous allocation without preallocating\n");
574 startv
= region_find_slot(vmp
, minv
, maxv
, length
);
575 if (startv
== SLOT_FAIL
)
578 /* Now we want a new region. */
579 if(!(newregion
= region_new(vmp
, startv
, length
, flags
))) {
580 printf("VM: map_page_region: allocating region failed\n");
584 /* If we know what we're going to map to, map it right away. */
585 if(what
!= MAP_NONE
) {
586 assert(!(what
% VM_PAGE_SIZE
));
587 assert(!(startv
% VM_PAGE_SIZE
));
588 assert(!(mapflags
& MF_PREALLOC
));
589 if(map_new_physblock(vmp
, newregion
, 0, length
,
590 what
, PAF_CLEAR
, 0) != OK
) {
591 printf("VM: map_new_physblock failed\n");
593 SLABFREE(newregion
->phys
););
599 if((flags
& VR_ANON
) && (mapflags
& MF_PREALLOC
)) {
600 if(map_handle_memory(vmp
, newregion
, 0, length
, 1) != OK
) {
601 printf("VM: map_page_region: prealloc failed\n");
603 SLABFREE(newregion
->phys
););
609 /* Pre-allocations should be uninitialized, but after that it's a
612 USE(newregion
, newregion
->flags
&= ~VR_UNINITIALIZED
;);
615 region_insert(&vmp
->vm_regions_avl
, newregion
);
618 assert(startv
== newregion
->vaddr
);
620 struct vir_region
*nextvr
;
621 if((nextvr
= getnextvr(newregion
))) {
622 assert(newregion
->vaddr
< nextvr
->vaddr
);
627 SANITYCHECK(SCL_FUNCTIONS
);
632 static struct phys_region
*reset_physr_iter(struct vir_region
*region
,
633 physr_iter
*iter
, vir_bytes offset
)
635 struct phys_region
*ph
;
637 physr_start_iter(region
->phys
, iter
, offset
, AVL_EQUAL
);
638 ph
= physr_get_iter(iter
);
640 assert(ph
->offset
== offset
);
645 /*===========================================================================*
647 *===========================================================================*/
648 static int map_subfree(struct vir_region
*region
,
649 vir_bytes start
, vir_bytes len
)
651 struct phys_region
*pr
;
653 vir_bytes end
= start
+len
;
660 SLABSANE(region
->phys
);
661 physr_start_iter_least(region
->phys
, &iter
);
662 while((pr
= physr_get_iter(&iter
))) {
663 struct phys_region
*others
;
664 struct phys_block
*pb
;
668 for(others
= pb
->firstregion
; others
;
669 others
= others
->next_ph_list
) {
670 assert(others
->ph
== pb
);
672 physr_incr_iter(&iter
);
677 if(start
== 0 && len
== region
->length
)
680 physr_init_iter(&iter
);
681 physr_start_iter(region
->phys
, &iter
, start
, AVL_GREATER_EQUAL
);
682 while((pr
= physr_get_iter(&iter
))) {
683 physr_incr_iter(&iter
);
684 if(pr
->offset
>= end
)
686 pb_unreferenced(region
, pr
, !full
);
688 physr_start_iter(region
->phys
, &iter
,
689 pr
->offset
, AVL_GREATER_EQUAL
);
695 physr_init(region
->phys
);
700 /*===========================================================================*
702 *===========================================================================*/
703 static int map_free(struct vir_region
*region
)
707 if((r
=map_subfree(region
, 0, region
->length
)) != OK
) {
708 printf("%d\n", __LINE__
);
713 SLABFREE(region
->phys
););
719 /*===========================================================================*
720 * yielded_block_cmp *
721 *===========================================================================*/
722 int yielded_block_cmp(struct block_id
*id1
, struct block_id
*id2
)
724 if(id1
->owner
< id2
->owner
)
726 if(id1
->owner
> id2
->owner
)
728 return cmp64(id1
->id
, id2
->id
);
732 /*===========================================================================*
733 * free_yielded_proc *
734 *===========================================================================*/
735 static vir_bytes
free_yielded_proc(struct vmproc
*vmp
)
740 SANITYCHECK(SCL_FUNCTIONS
);
742 /* Free associated regions. */
743 for(h
= 0; h
< YIELD_HASHSIZE
&& vmp
->vm_yielded
> 0; h
++) {
746 yielded_avl
*avl
= &vm_yielded_blocks
[h
];
747 yielded_start_iter_least(avl
, &iter
);
748 while((yb
= yielded_get_iter(&iter
))) {
751 yielded_incr_iter(&iter
);
752 if(yb
->id
.owner
!= vmp
->vm_endpoint
)
754 next_yb
= yielded_get_iter(&iter
);
755 total
+= freeyieldednode(yb
, 1);
756 /* the above removal invalidated our iter; restart it
757 * for the node we want to start at.
760 yielded_start_iter(avl
, &iter
, next_yb
->id
, AVL_EQUAL
);
761 assert(yielded_get_iter(&iter
) == next_yb
);
769 static phys_bytes
freeyieldednode(yielded_t
*node
, int freemem
)
771 yielded_t
*older
, *younger
, *removed
;
781 younger
= node
->younger
;
786 assert(younger
->older
== node
);
787 USE(younger
, younger
->older
= node
->older
;);
789 assert(node
== lru_youngest
);
790 lru_youngest
= node
->older
;
795 assert(older
->younger
== node
);
796 USE(older
, older
->younger
= node
->younger
;);
798 assert(node
== lru_oldest
);
799 lru_oldest
= node
->younger
;
806 if(vm_isokendpt(node
->id
.owner
, &p
) != OK
)
807 panic("out of date owner of yielded block %d", node
->id
.owner
);
808 avl
= get_yielded_avl(node
->id
);
809 removed
= yielded_remove(avl
, node
->id
);
810 assert(removed
== node
);
811 assert(vmproc
[p
].vm_yielded
> 0);
812 vmproc
[p
].vm_yielded
--;
814 /* Free associated memory if requested. */
817 free_mem(ABS2CLICK(node
->physaddr
), node
->pages
);
826 /*========================================================================*
828 *========================================================================*/
829 vir_bytes
free_yielded(vir_bytes max_bytes
)
832 /* PRIVATE yielded_t *lru_youngest = NULL, *lru_oldest = NULL; */
836 while(freed
< max_bytes
&& lru_oldest
) {
837 SLABSANE(lru_oldest
);
838 freed
+= freeyieldednode(lru_oldest
, 1);
845 /*========================================================================*
847 *========================================================================*/
848 int map_free_proc(vmp
)
851 struct vir_region
*r
;
853 while((r
= region_search_root(&vmp
->vm_regions_avl
))) {
854 SANITYCHECK(SCL_DETAIL
);
858 region_remove(&vmp
->vm_regions_avl
, r
->vaddr
); /* For sanity checks. */
863 SANITYCHECK(SCL_DETAIL
);
866 region_init(&vmp
->vm_regions_avl
);
868 /* Free associated yielded blocks. */
869 free_yielded_proc(vmp
);
871 SANITYCHECK(SCL_FUNCTIONS
);
876 /*===========================================================================*
878 *===========================================================================*/
879 struct vir_region
*map_lookup(vmp
, offset
, physr
)
882 struct phys_region
**physr
;
884 struct vir_region
*r
;
886 SANITYCHECK(SCL_FUNCTIONS
);
889 if(!region_search_root(&vmp
->vm_regions_avl
))
890 panic("process has no regions: %d", vmp
->vm_endpoint
);
893 if((r
= region_search(&vmp
->vm_regions_avl
, offset
, AVL_LESS_EQUAL
))) {
895 if(offset
>= r
->vaddr
&& offset
< r
->vaddr
+ r
->length
) {
896 ph
= offset
- r
->vaddr
;
898 *physr
= physr_search(r
->phys
, ph
, AVL_EQUAL
);
899 assert((*physr
)->offset
== ph
);
905 SANITYCHECK(SCL_FUNCTIONS
);
910 static u32_t
vrallocflags(u32_t flags
)
912 u32_t allocflags
= 0;
914 if(flags
& VR_PHYS64K
)
915 allocflags
|= PAF_ALIGN64K
;
916 if(flags
& VR_LOWER16MB
)
917 allocflags
|= PAF_LOWER16MB
;
918 if(flags
& VR_LOWER1MB
)
919 allocflags
|= PAF_LOWER1MB
;
920 if(flags
& VR_CONTIG
)
921 allocflags
|= PAF_CONTIG
;
926 /*===========================================================================*
927 * map_new_physblock *
928 *===========================================================================*/
929 static int map_new_physblock(vmp
, region
, start_offset
, length
,
930 what_mem
, allocflags
, written
)
932 struct vir_region
*region
;
933 vir_bytes start_offset
;
939 struct memlist
*memlist
, *ml
;
941 vir_bytes mapped
= 0;
942 vir_bytes offset
= start_offset
;
944 SANITYCHECK(SCL_FUNCTIONS
);
946 assert(!(length
% VM_PAGE_SIZE
));
948 if((region
->flags
& VR_CONTIG
) &&
949 (start_offset
> 0 || length
< region
->length
)) {
950 printf("VM: region length 0x%lx, offset 0x%lx length 0x%lx\n",
951 region
->length
, start_offset
, length
);
953 printf("VM: map_new_physblock: non-full contig allocation requested\n");
957 /* Memory for new physical block. */
958 allocflags
|= vrallocflags(region
->flags
);
960 if(allocflags
& PAF_CONTIG
) {
961 assert(what_mem
== MAP_NONE
);
962 if((what_mem
= alloc_mem(length
/VM_PAGE_SIZE
, allocflags
)) == NO_MEM
) {
965 what_mem
= CLICK2ABS(what_mem
);
966 allocflags
&= ~PAF_CONTIG
;
967 assert(what_mem
!= MAP_NONE
);
970 if(!(memlist
= alloc_mem_in_list(length
, allocflags
, what_mem
))) {
971 printf("map_new_physblock: couldn't allocate\n");
977 for(ml
= memlist
; ml
; ml
= ml
->next
) {
978 struct phys_region
*newphysr
= NULL
;
979 struct phys_block
*newpb
= NULL
;
981 /* Allocate things necessary for this chunk of memory. */
982 if(!(newpb
= pb_new(ml
->phys
)) ||
983 !(newphysr
= pb_reference(newpb
, offset
, region
))) {
984 printf("map_new_physblock: no memory for the ph slabs\n");
986 if(newpb
) SLABFREE(newpb
);
991 /* Update pagetable. */
992 if(map_ph_writept(vmp
, region
, newphysr
) != OK
) {
993 printf("map_new_physblock: map_ph_writept failed\n");
998 offset
+= VM_PAGE_SIZE
;
999 mapped
+= VM_PAGE_SIZE
;
1003 offset
= start_offset
;
1004 /* Things did not go well. Undo everything. */
1005 for(ml
= memlist
; ml
; ml
= ml
->next
) {
1006 struct phys_region
*physr
;
1007 if((physr
= physr_search(region
->phys
, offset
,
1009 assert(physr
->ph
->refcount
== 1);
1010 pb_unreferenced(region
, physr
, 1);
1013 offset
+= VM_PAGE_SIZE
;
1015 } else assert(mapped
== length
);
1017 /* Always clean up the memlist itself, even if everything
1018 * worked we're not using the memlist nodes any more. And
1019 * the memory they reference is either freed above or in use.
1021 free_mem_list(memlist
, 0);
1023 SANITYCHECK(SCL_FUNCTIONS
);
1028 /*===========================================================================*
1029 * map_clone_ph_block *
1030 *===========================================================================*/
1031 static struct phys_region
*map_clone_ph_block(vmp
, region
, ph
, iter
)
1033 struct vir_region
*region
;
1034 struct phys_region
*ph
;
1039 phys_bytes physaddr
;
1040 struct phys_region
*newpr
;
1041 int region_has_single_block
;
1044 written
= ph
->written
;
1046 SANITYCHECK(SCL_FUNCTIONS
);
1048 /* Warning: this function will free the passed
1049 * phys_region *ph and replace it (in the same offset)
1050 * with another! So both the pointer to it
1051 * and any iterators over the phys_regions in the vir_region
1052 * will be invalid on successful return. (Iterators over
1053 * the vir_region could be invalid on unsuccessful return too.)
1056 /* This is only to be done if there is more than one copy. */
1057 assert(ph
->ph
->refcount
> 1);
1059 /* This function takes a physical block, copies its contents
1060 * into newly allocated memory, and replaces the single physical
1061 * block by one or more physical blocks with refcount 1 with the
1062 * same contents as the original. In other words, a fragmentable
1063 * version of map_copy_ph_block().
1066 /* Remember where and how much. */
1067 offset
= ph
->offset
;
1068 physaddr
= ph
->ph
->phys
;
1070 /* Now unlink the original physical block so we can replace
1076 assert(ph
->ph
->refcount
> 1);
1077 pb_unreferenced(region
, ph
, 1);
1078 assert(ph
->ph
->refcount
>= 1);
1081 SANITYCHECK(SCL_DETAIL
);
1083 /* Put new free memory in. */
1084 allocflags
= vrallocflags(region
->flags
);
1085 region_has_single_block
= (offset
== 0 && region
->length
== VM_PAGE_SIZE
);
1086 assert(region_has_single_block
|| !(allocflags
& PAF_CONTIG
));
1087 assert(!(allocflags
& PAF_CLEAR
));
1089 if(map_new_physblock(vmp
, region
, offset
, VM_PAGE_SIZE
,
1090 MAP_NONE
, allocflags
, written
) != OK
) {
1091 /* XXX original range now gone. */
1092 printf("VM: map_clone_ph_block: map_new_physblock failed.\n");
1096 /* Copy the block to the new memory.
1097 * Can only fail if map_new_physblock didn't do what we asked.
1099 if(copy_abs2region(physaddr
, region
, offset
, VM_PAGE_SIZE
) != OK
)
1100 panic("copy_abs2region failed, no good reason for that");
1102 newpr
= physr_search(region
->phys
, offset
, AVL_EQUAL
);
1104 assert(newpr
->offset
== offset
);
1107 physr_start_iter(region
->phys
, iter
, offset
, AVL_EQUAL
);
1108 assert(physr_get_iter(iter
) == newpr
);
1111 SANITYCHECK(SCL_FUNCTIONS
);
1117 /*===========================================================================*
1119 *===========================================================================*/
1120 int map_pf(vmp
, region
, offset
, write
)
1122 struct vir_region
*region
;
1127 struct phys_region
*ph
;
1130 assert(offset
>= 0);
1131 assert(offset
< region
->length
);
1133 assert(region
->flags
& VR_ANON
);
1134 assert(!(region
->vaddr
% VM_PAGE_SIZE
));
1136 virpage
= offset
- offset
% VM_PAGE_SIZE
;
1138 SANITYCHECK(SCL_FUNCTIONS
);
1140 if((ph
= physr_search(region
->phys
, offset
, AVL_LESS_EQUAL
)) &&
1141 (ph
->offset
<= offset
&& offset
< ph
->offset
+ VM_PAGE_SIZE
)) {
1142 /* Pagefault in existing block. Do copy-on-write. */
1144 assert(region
->flags
& VR_WRITABLE
);
1145 assert(ph
->ph
->refcount
> 0);
1147 if(WRITABLE(region
, ph
->ph
)) {
1148 r
= map_ph_writept(vmp
, region
, ph
);
1150 printf("map_ph_writept failed\n");
1152 if(ph
->ph
->refcount
> 0
1153 && ph
->ph
->share_flag
!= PBSH_COW
) {
1154 printf("VM: write RO mapped pages.\n");
1157 if(!map_clone_ph_block(vmp
, region
, ph
, NULL
))
1162 /* Pagefault in non-existing block. Map in new block. */
1163 if(map_new_physblock(vmp
, region
, virpage
,
1164 VM_PAGE_SIZE
, MAP_NONE
, PAF_CLEAR
, 0) != OK
) {
1165 printf("map_new_physblock failed\n");
1170 SANITYCHECK(SCL_FUNCTIONS
);
1173 printf("VM: map_pf: failed (%d)\n", r
);
1178 if(OK
!= pt_checkrange(&vmp
->vm_pt
, region
->vaddr
+virpage
,
1179 VM_PAGE_SIZE
, write
)) {
1180 panic("map_pf: pt_checkrange failed: %d", r
);
1187 /*===========================================================================*
1189 *===========================================================================*/
1190 int map_pin_memory(struct vmproc
*vmp
)
1192 struct vir_region
*vr
;
1195 region_start_iter_least(&vmp
->vm_regions_avl
, &iter
);
1196 /* Scan all memory regions. */
1197 while((vr
= region_get_iter(&iter
))) {
1198 /* Make sure region is mapped to physical memory and writable.*/
1199 r
= map_handle_memory(vmp
, vr
, 0, vr
->length
, 1);
1201 panic("map_pin_memory: map_handle_memory failed: %d", r
);
1203 region_incr_iter(&iter
);
1208 /*===========================================================================*
1209 * map_handle_memory *
1210 *===========================================================================*/
1211 int map_handle_memory(vmp
, region
, offset
, length
, write
)
1213 struct vir_region
*region
;
1214 vir_bytes offset
, length
;
1217 struct phys_region
*physr
, *nextphysr
;
1220 u32_t allocflags
= 0;
1222 if(!(region
->flags
& VR_UNINITIALIZED
)) {
1223 allocflags
= PAF_CLEAR
;
1226 #define FREE_RANGE_HERE(er1, er2) { \
1227 struct phys_region *r1 = (er1), *r2 = (er2); \
1228 vir_bytes start = offset, end = offset + length; \
1230 start = MAX(start, r1->offset + VM_PAGE_SIZE); } \
1232 end = MIN(end, r2->offset); } \
1234 SANITYCHECK(SCL_DETAIL); \
1235 if(map_new_physblock(vmp, region, start, \
1236 end-start, MAP_NONE, allocflags, 0) != OK) { \
1237 SANITYCHECK(SCL_DETAIL); \
1244 SANITYCHECK(SCL_FUNCTIONS
);
1246 assert(region
->flags
& VR_ANON
);
1247 assert(!(region
->vaddr
% VM_PAGE_SIZE
));
1248 assert(!(offset
% VM_PAGE_SIZE
));
1249 assert(!(length
% VM_PAGE_SIZE
));
1250 assert(!write
|| (region
->flags
& VR_WRITABLE
));
1252 physr_start_iter(region
->phys
, &iter
, offset
, AVL_LESS_EQUAL
);
1253 physr
= physr_get_iter(&iter
);
1256 physr_start_iter(region
->phys
, &iter
, offset
, AVL_GREATER_EQUAL
);
1257 physr
= physr_get_iter(&iter
);
1260 FREE_RANGE_HERE(NULL
, physr
);
1263 physr
= reset_physr_iter(region
, &iter
, physr
->offset
);
1264 if(physr
->offset
+ VM_PAGE_SIZE
<= offset
) {
1265 physr_incr_iter(&iter
);
1266 physr
= physr_get_iter(&iter
);
1268 FREE_RANGE_HERE(NULL
, physr
);
1270 physr
= reset_physr_iter(region
, &iter
,
1279 SANITYCHECK(SCL_DETAIL
);
1282 assert(physr
->ph
->refcount
> 0);
1283 if(!WRITABLE(region
, physr
->ph
)) {
1284 if(!(physr
= map_clone_ph_block(vmp
, region
,
1286 printf("VM: map_handle_memory: no copy\n");
1291 SANITYCHECK(SCL_DETAIL
);
1292 if((r
=map_ph_writept(vmp
, region
, physr
)) != OK
) {
1293 printf("VM: map_ph_writept failed\n");
1297 SANITYCHECK(SCL_DETAIL
);
1301 SANITYCHECK(SCL_DETAIL
);
1302 physr_incr_iter(&iter
);
1303 nextphysr
= physr_get_iter(&iter
);
1304 FREE_RANGE_HERE(physr
, nextphysr
);
1305 SANITYCHECK(SCL_DETAIL
);
1307 if(nextphysr
->offset
>= offset
+ length
)
1309 nextphysr
= reset_physr_iter(region
, &iter
,
1315 SANITYCHECK(SCL_FUNCTIONS
);
1319 printf("region start at 0x%lx offset 0x%lx len 0x%lx write %d\n",
1320 region
->vaddr
, offset
, length
, write
);
1321 printf("no changes in map_handle_memory\n");
1327 if(OK
!= pt_checkrange(&vmp
->vm_pt
, region
->vaddr
+offset
, length
, write
)) {
1328 printf("handle mem 0x%lx-0x%lx failed\n",
1329 region
->vaddr
+offset
,region
->vaddr
+offset
+length
);
1330 map_printregion(vmp
, region
);
1331 panic("checkrange failed");
1339 static int count_phys_regions(struct vir_region
*vr
)
1342 struct phys_region
*ph
;
1344 physr_start_iter_least(vr
->phys
, &iter
);
1345 while((ph
= physr_get_iter(&iter
))) {
1347 physr_incr_iter(&iter
);
1353 /*===========================================================================*
1355 *===========================================================================*/
1356 static struct vir_region
*map_copy_region(struct vmproc
*vmp
, struct vir_region
*vr
)
1358 /* map_copy_region creates a complete copy of the vir_region
1359 * data structure, linking in the same phys_blocks directly,
1360 * but all in limbo, i.e., the caller has to link the vir_region
1361 * to a process. Therefore it doesn't increase the refcount in
1362 * the phys_block; the caller has to do this once it's linked.
1363 * The reason for this is to keep the sanity checks working
1364 * within this function.
1366 struct vir_region
*newvr
;
1367 struct phys_region
*ph
;
1371 cr
= count_phys_regions(vr
);
1374 if(!(newvr
= region_new(vr
->parent
, vr
->vaddr
, vr
->length
, vr
->flags
)))
1377 physr_start_iter_least(vr
->phys
, &iter
);
1378 while((ph
= physr_get_iter(&iter
))) {
1379 struct phys_region
*newph
= pb_reference(ph
->ph
, ph
->offset
, newvr
);
1381 if(!newph
) { map_free(newvr
); return NULL
; }
1384 USE(newph
, newph
->written
= 0;);
1385 assert(count_phys_regions(vr
) == cr
);
1387 physr_incr_iter(&iter
);
1391 assert(count_phys_regions(vr
) == count_phys_regions(newvr
));
1397 /*===========================================================================*
1399 *===========================================================================*/
1400 int copy_abs2region(phys_bytes abs
, struct vir_region
*destregion
,
1401 phys_bytes offset
, phys_bytes len
)
1405 assert(destregion
->phys
);
1407 phys_bytes sublen
, suboffset
;
1408 struct phys_region
*ph
;
1410 assert(destregion
->phys
);
1411 if(!(ph
= physr_search(destregion
->phys
, offset
, AVL_LESS_EQUAL
))) {
1412 printf("VM: copy_abs2region: no phys region found (1).\n");
1415 assert(ph
->offset
<= offset
);
1416 if(ph
->offset
+VM_PAGE_SIZE
<= offset
) {
1417 printf("VM: copy_abs2region: no phys region found (2).\n");
1420 suboffset
= offset
- ph
->offset
;
1421 assert(suboffset
< VM_PAGE_SIZE
);
1423 if(sublen
> VM_PAGE_SIZE
- suboffset
)
1424 sublen
= VM_PAGE_SIZE
- suboffset
;
1425 assert(suboffset
+ sublen
<= VM_PAGE_SIZE
);
1426 if(ph
->ph
->refcount
!= 1) {
1427 printf("VM: copy_abs2region: refcount not 1.\n");
1431 if(sys_abscopy(abs
, ph
->ph
->phys
+ suboffset
, sublen
) != OK
) {
1432 printf("VM: copy_abs2region: abscopy failed.\n");
1443 /*=========================================================================*
1445 *=========================================================================*/
1446 int map_writept(struct vmproc
*vmp
)
1448 struct vir_region
*vr
;
1449 struct phys_region
*ph
;
1452 region_start_iter_least(&vmp
->vm_regions_avl
, &v_iter
);
1454 while((vr
= region_get_iter(&v_iter
))) {
1456 physr_start_iter_least(vr
->phys
, &ph_iter
);
1458 while((ph
= physr_get_iter(&ph_iter
))) {
1459 physr_incr_iter(&ph_iter
);
1461 /* If this phys block is shared as SMAP, then do
1462 * not update the page table. */
1463 if(ph
->ph
->refcount
> 1
1464 && ph
->ph
->share_flag
== PBSH_SMAP
) {
1468 if((r
=map_ph_writept(vmp
, vr
, ph
)) != OK
) {
1469 printf("VM: map_writept: failed\n");
1473 region_incr_iter(&v_iter
);
1479 /*========================================================================*
1481 *========================================================================*/
1482 int map_proc_copy(dst
, src
)
1486 /* Copy all the memory regions from the src process to the dst process. */
1487 region_init(&dst
->vm_regions_avl
);
1489 return map_proc_copy_from(dst
, src
, NULL
);
1492 /*========================================================================*
1493 * map_proc_copy_from *
1494 *========================================================================*/
1495 int map_proc_copy_from(dst
, src
, start_src_vr
)
1498 struct vir_region
*start_src_vr
;
1500 struct vir_region
*vr
;
1504 start_src_vr
= region_search_least(&src
->vm_regions_avl
);
1506 assert(start_src_vr
);
1507 assert(start_src_vr
->parent
== src
);
1508 region_start_iter(&src
->vm_regions_avl
, &v_iter
,
1509 start_src_vr
->vaddr
, AVL_EQUAL
);
1510 assert(region_get_iter(&v_iter
) == start_src_vr
);
1512 /* Copy source regions after the destination's last region (if any). */
1514 SANITYCHECK(SCL_FUNCTIONS
);
1516 while((vr
= region_get_iter(&v_iter
))) {
1517 physr_iter iter_orig
, iter_new
;
1518 struct vir_region
*newvr
;
1519 struct phys_region
*orig_ph
, *new_ph
;
1520 if(!(newvr
= map_copy_region(dst
, vr
))) {
1524 USE(newvr
, newvr
->parent
= dst
;);
1525 region_insert(&dst
->vm_regions_avl
, newvr
);
1526 physr_start_iter_least(vr
->phys
, &iter_orig
);
1527 physr_start_iter_least(newvr
->phys
, &iter_new
);
1528 while((orig_ph
= physr_get_iter(&iter_orig
))) {
1529 struct phys_block
*pb
;
1530 new_ph
= physr_get_iter(&iter_new
);
1531 /* Check two physregions both are nonnull,
1532 * are different, and match physblocks.
1536 assert(orig_ph
!= new_ph
);
1538 assert(orig_ph
->ph
== new_ph
->ph
);
1540 /* If the phys block has been shared as SMAP,
1541 * do the regular copy. */
1542 if(pb
->refcount
> 2 && pb
->share_flag
== PBSH_SMAP
) {
1543 map_clone_ph_block(dst
, newvr
,new_ph
,
1546 USE(pb
, pb
->share_flag
= PBSH_COW
;);
1549 /* Get next new physregion */
1550 physr_incr_iter(&iter_orig
);
1551 physr_incr_iter(&iter_new
);
1553 assert(!physr_get_iter(&iter_new
));
1554 region_incr_iter(&v_iter
);
1560 SANITYCHECK(SCL_FUNCTIONS
);
1564 int map_region_extend_upto_v(struct vmproc
*vmp
, vir_bytes v
)
1566 vir_bytes offset
= v
, end
;
1567 struct vir_region
*vr
, *nextvr
;
1570 if(!(vr
= region_search(&vmp
->vm_regions_avl
, offset
, AVL_LESS
))) {
1571 printf("VM: nothing to extend\n");
1575 if(!(vr
->flags
& VR_ANON
)) {
1576 printf("VM: memory range to extend not anonymous\n");
1580 assert(vr
->vaddr
<= offset
);
1581 if((nextvr
= getnextvr(vr
))) {
1582 assert(offset
<= nextvr
->vaddr
);
1585 end
= vr
->vaddr
+ vr
->length
;
1587 offset
= roundup(offset
, VM_PAGE_SIZE
);
1590 r
= map_region_extend(vmp
, vr
, offset
- end
);
1595 /*========================================================================*
1596 * map_region_extend *
1597 *========================================================================*/
1598 int map_region_extend(struct vmproc
*vmp
, struct vir_region
*vr
,
1602 struct vir_region
*nextvr
;
1605 assert(vr
->flags
& VR_ANON
);
1606 assert(!(delta
% VM_PAGE_SIZE
));
1607 if(vr
->flags
& VR_CONTIG
) {
1608 printf("VM: can't grow contig region\n");
1612 if(!delta
) return OK
;
1613 end
= vr
->vaddr
+ vr
->length
;
1614 assert(end
>= vr
->vaddr
);
1616 if(end
+ delta
<= end
) {
1617 printf("VM: strange delta 0x%lx\n", delta
);
1621 nextvr
= getnextvr(vr
);
1623 if(!nextvr
|| end
+ delta
<= nextvr
->vaddr
) {
1624 USE(vr
, vr
->length
+= delta
;);
1631 /*========================================================================*
1632 * map_region_shrink *
1633 *========================================================================*/
1634 int map_region_shrink(struct vir_region
*vr
, vir_bytes delta
)
1637 assert(vr
->flags
& VR_ANON
);
1638 assert(!(delta
% VM_PAGE_SIZE
));
1641 printf("VM: ignoring region shrink\n");
1647 struct vir_region
*map_region_lookup_tag(vmp
, tag
)
1651 struct vir_region
*vr
;
1653 region_start_iter_least(&vmp
->vm_regions_avl
, &v_iter
);
1655 while((vr
= region_get_iter(&v_iter
))) {
1658 region_incr_iter(&v_iter
);
1664 void map_region_set_tag(struct vir_region
*vr
, u32_t tag
)
1666 USE(vr
, vr
->tag
= tag
;);
1669 u32_t
map_region_get_tag(struct vir_region
*vr
)
1674 /*========================================================================*
1675 * map_unmap_region *
1676 *========================================================================*/
1677 int map_unmap_region(struct vmproc
*vmp
, struct vir_region
*r
,
1678 vir_bytes offset
, vir_bytes len
)
1680 /* Shrink the region by 'len' bytes, from the start. Unreference
1681 * memory it used to reference if any.
1683 vir_bytes regionstart
;
1685 SANITYCHECK(SCL_FUNCTIONS
);
1687 if(offset
+len
> r
->length
|| (len
% VM_PAGE_SIZE
)) {
1688 printf("VM: bogus length 0x%lx\n", len
);
1692 if(!(r
->flags
& (VR_ANON
|VR_DIRECT
))) {
1693 printf("VM: only unmap anonymous or direct memory\n");
1697 regionstart
= r
->vaddr
+ offset
;
1699 /* unreference its memory */
1700 map_subfree(r
, offset
, len
);
1702 /* if unmap was at start/end of this region, it actually shrinks */
1704 struct phys_region
*pr
;
1707 region_remove(&vmp
->vm_regions_avl
, r
->vaddr
);
1713 region_insert(&vmp
->vm_regions_avl
, r
);
1715 /* vaddr has increased; to make all the phys_regions
1716 * point to the same addresses, make them shrink by the
1719 physr_init_iter(&iter
);
1720 physr_start_iter(r
->phys
, &iter
, offset
, AVL_GREATER_EQUAL
);
1722 while((pr
= physr_get_iter(&iter
))) {
1723 assert(pr
->offset
>= offset
);
1724 USE(pr
, pr
->offset
-= len
;);
1725 physr_incr_iter(&iter
);
1727 } else if(offset
+ len
== r
->length
) {
1728 assert(len
<= r
->length
);
1732 if(r
->length
== 0) {
1733 /* Whole region disappears. Unlink and free it. */
1734 region_remove(&vmp
->vm_regions_avl
, r
->vaddr
);
1738 SANITYCHECK(SCL_DETAIL
);
1740 if(pt_writemap(vmp
, &vmp
->vm_pt
, regionstart
,
1741 MAP_NONE
, len
, 0, WMF_OVERWRITE
) != OK
) {
1742 printf("VM: map_unmap_region: pt_writemap failed\n");
1746 SANITYCHECK(SCL_FUNCTIONS
);
1751 /*========================================================================*
1753 *========================================================================*/
1754 int map_remap(struct vmproc
*dvmp
, vir_bytes da
, size_t size
,
1755 struct vir_region
*region
, vir_bytes
*r
, int readonly
)
1757 struct vir_region
*vr
;
1758 struct phys_region
*ph
;
1759 vir_bytes startv
, dst_addr
;
1762 SANITYCHECK(SCL_FUNCTIONS
);
1764 assert(region
->flags
& VR_SHARED
);
1766 /* da is handled differently */
1772 /* round up to page size */
1773 assert(!(size
% VM_PAGE_SIZE
));
1774 startv
= region_find_slot(dvmp
, dst_addr
, VM_DATATOP
, size
);
1775 if (startv
== SLOT_FAIL
) {
1778 /* when the user specifies the address, we cannot change it */
1779 if (da
&& (startv
!= dst_addr
))
1782 vr
= map_copy_region(dvmp
, region
);
1789 vr
->flags
= region
->flags
;
1793 vr
->flags
&= ~VR_WRITABLE
;
1796 assert(vr
->flags
& VR_SHARED
);
1798 region_insert(&dvmp
->vm_regions_avl
, vr
);
1800 physr_start_iter_least(vr
->phys
, &iter
);
1801 while((ph
= physr_get_iter(&iter
))) {
1802 if(map_ph_writept(dvmp
, vr
, ph
) != OK
) {
1803 panic("map_remap: map_ph_writept failed");
1805 physr_incr_iter(&iter
);
1810 SANITYCHECK(SCL_FUNCTIONS
);
1815 /*========================================================================*
1817 *========================================================================*/
1818 int map_get_phys(struct vmproc
*vmp
, vir_bytes addr
, phys_bytes
*r
)
1820 struct vir_region
*vr
;
1821 struct phys_region
*ph
;
1824 if (!(vr
= map_lookup(vmp
, addr
, NULL
)) ||
1825 (vr
->vaddr
!= addr
))
1828 if (!(vr
->flags
& VR_SHARED
))
1831 physr_start_iter_least(vr
->phys
, &iter
);
1832 ph
= physr_get_iter(&iter
);
1842 /*========================================================================*
1844 *========================================================================*/
1845 int map_get_ref(struct vmproc
*vmp
, vir_bytes addr
, u8_t
*cnt
)
1847 struct vir_region
*vr
;
1848 struct phys_region
*ph
;
1851 if (!(vr
= map_lookup(vmp
, addr
, NULL
)) ||
1852 (vr
->vaddr
!= addr
))
1855 if (!(vr
->flags
& VR_SHARED
))
1858 physr_start_iter_least(vr
->phys
, &iter
);
1859 ph
= physr_get_iter(&iter
);
1864 *cnt
= ph
->ph
->refcount
;
1869 /*========================================================================*
1871 *========================================================================*/
1872 void get_stats_info(struct vm_stats_info
*vsi
)
1876 vsi
->vsi_cached
= 0L;
1878 for(yb
= lru_youngest
; yb
; yb
= yb
->older
)
1882 /*========================================================================*
1884 *========================================================================*/
1885 void get_usage_info(struct vmproc
*vmp
, struct vm_usage_info
*vui
)
1887 struct vir_region
*vr
;
1889 struct phys_region
*ph
;
1891 region_start_iter_least(&vmp
->vm_regions_avl
, &v_iter
);
1893 memset(vui
, 0, sizeof(*vui
));
1895 while((vr
= region_get_iter(&v_iter
))) {
1896 physr_start_iter_least(vr
->phys
, &iter
);
1897 while((ph
= physr_get_iter(&iter
))) {
1898 /* All present pages are counted towards the total. */
1899 vui
->vui_total
+= VM_PAGE_SIZE
;
1901 if (ph
->ph
->refcount
> 1) {
1902 /* Any page with a refcount > 1 is common. */
1903 vui
->vui_common
+= VM_PAGE_SIZE
;
1905 /* Any common, non-COW page is shared. */
1906 if (vr
->flags
& VR_SHARED
||
1907 ph
->ph
->share_flag
== PBSH_SMAP
)
1908 vui
->vui_shared
+= VM_PAGE_SIZE
;
1910 physr_incr_iter(&iter
);
1912 region_incr_iter(&v_iter
);
1916 /*===========================================================================*
1918 *===========================================================================*/
1919 int get_region_info(struct vmproc
*vmp
, struct vm_region_info
*vri
,
1920 int max
, vir_bytes
*nextp
)
1922 struct vir_region
*vr
;
1931 region_start_iter(&vmp
->vm_regions_avl
, &v_iter
, next
, AVL_GREATER_EQUAL
);
1932 if(!(vr
= region_get_iter(&v_iter
))) return 0;
1934 for(count
= 0; (vr
= region_get_iter(&v_iter
)) && count
< max
; count
++, vri
++) {
1935 struct phys_region
*ph1
, *ph2
;
1937 /* Report part of the region that's actually in use. */
1939 /* Get first and last phys_regions, if any */
1940 ph1
= physr_search_least(vr
->phys
);
1941 ph2
= physr_search_greatest(vr
->phys
);
1942 if(!ph1
|| !ph2
) { assert(!ph1
&& !ph2
); continue; }
1944 /* Report start+length of region starting from lowest use. */
1945 vri
->vri_addr
= vr
->vaddr
+ ph1
->offset
;
1947 vri
->vri_length
= ph2
->offset
+ VM_PAGE_SIZE
- ph1
->offset
;
1949 /* "AND" the provided protection with per-page protection. */
1950 if (!(vr
->flags
& VR_WRITABLE
))
1951 vri
->vri_prot
&= ~PROT_WRITE
;
1953 vri
->vri_flags
= (vr
->flags
& VR_SHARED
) ? MAP_IPC_SHARED
: 0;
1955 next
= vr
->vaddr
+ vr
->length
;
1956 region_incr_iter(&v_iter
);
1963 /*========================================================================*
1964 * regionprintstats *
1965 *========================================================================*/
1966 void printregionstats(struct vmproc
*vmp
)
1968 struct vir_region
*vr
;
1969 struct phys_region
*pr
;
1971 vir_bytes used
= 0, weighted
= 0;
1973 region_start_iter_least(&vmp
->vm_regions_avl
, &v_iter
);
1975 while((vr
= region_get_iter(&v_iter
))) {
1976 region_incr_iter(&v_iter
);
1977 if(vr
->flags
& VR_DIRECT
)
1979 physr_start_iter_least(vr
->phys
, &iter
);
1980 while((pr
= physr_get_iter(&iter
))) {
1981 physr_incr_iter(&iter
);
1982 used
+= VM_PAGE_SIZE
;
1983 weighted
+= VM_PAGE_SIZE
/ pr
->ph
->refcount
;
1987 printf("%6lukB %6lukB\n", used
/1024, weighted
/1024);
1992 /*===========================================================================*
1994 *===========================================================================*/
1995 static int do_map_memory(struct vmproc
*vms
, struct vmproc
*vmd
,
1996 struct vir_region
*vrs
, struct vir_region
*vrd
,
1997 vir_bytes offset_s
, vir_bytes offset_d
,
1998 vir_bytes length
, int flag
)
2000 struct phys_region
*prs
;
2001 struct phys_region
*newphysr
;
2002 struct phys_block
*pb
;
2004 u32_t pt_flag
= PTF_PRESENT
| PTF_USER
;
2007 /* Search for the first phys region in the source process. */
2008 physr_start_iter(vrs
->phys
, &iter
, offset_s
, AVL_EQUAL
);
2009 prs
= physr_get_iter(&iter
);
2011 panic("do_map_memory: no aligned phys region: %d", 0);
2013 /* flag: 0 -> read-only
2015 * -1 -> share as COW, so read-only
2018 pt_flag
|= PTF_WRITE
;
2020 pt_flag
|= PTF_READ
;
2022 /* Map phys blocks in the source process to the destination process. */
2023 end
= offset_d
+ length
;
2024 while((prs
= physr_get_iter(&iter
)) && offset_d
< end
) {
2025 /* If a SMAP share was requested but the phys block has already
2026 * been shared as COW, copy the block for the source phys region
2030 if(flag
>= 0 && pb
->refcount
> 1
2031 && pb
->share_flag
== PBSH_COW
) {
2032 if(!(prs
= map_clone_ph_block(vms
, vrs
, prs
, &iter
)))
2037 /* Allocate a new phys region. */
2038 if(!(newphysr
= pb_reference(pb
, offset_d
, vrd
)))
2041 /* If a COW share was requested but the phys block has already
2042 * been shared as SMAP, give up on COW and copy the block for
2043 * the destination phys region now.
2045 if(flag
< 0 && pb
->refcount
> 1
2046 && pb
->share_flag
== PBSH_SMAP
) {
2047 if(!(newphysr
= map_clone_ph_block(vmd
, vrd
,
2053 /* See if this is a COW share or SMAP share. */
2054 if(flag
< 0) { /* COW share */
2055 pb
->share_flag
= PBSH_COW
;
2056 /* Update the page table for the src process. */
2057 pt_writemap(vms
, &vms
->vm_pt
, offset_s
+ vrs
->vaddr
,
2058 pb
->phys
, VM_PAGE_SIZE
,
2059 pt_flag
, WMF_OVERWRITE
);
2061 else { /* SMAP share */
2062 pb
->share_flag
= PBSH_SMAP
;
2064 /* Update the page table for the destination process. */
2065 pt_writemap(vmd
, &vmd
->vm_pt
, offset_d
+ vrd
->vaddr
,
2066 pb
->phys
, VM_PAGE_SIZE
, pt_flag
, WMF_OVERWRITE
);
2069 physr_incr_iter(&iter
);
2070 offset_d
+= VM_PAGE_SIZE
;
2071 offset_s
+= VM_PAGE_SIZE
;
2076 /*===========================================================================*
2078 *===========================================================================*/
2079 int unmap_memory(endpoint_t sour
, endpoint_t dest
,
2080 vir_bytes virt_s
, vir_bytes virt_d
, vir_bytes length
, int flag
)
2083 struct vir_region
*vrd
;
2084 struct phys_region
*pr
;
2085 struct phys_block
*pb
;
2090 /* Use information on the destination process to unmap. */
2091 if(vm_isokendpt(dest
, &p
) != OK
)
2092 panic("unmap_memory: bad endpoint: %d", dest
);
2095 vrd
= map_lookup(vmd
, virt_d
, NULL
);
2098 /* Search for the first phys region in the destination process. */
2099 off
= virt_d
- vrd
->vaddr
;
2100 physr_start_iter(vrd
->phys
, &iter
, off
, AVL_EQUAL
);
2101 pr
= physr_get_iter(&iter
);
2103 panic("unmap_memory: no aligned phys region: %d", 0);
2105 /* Copy the phys block now rather than doing COW. */
2107 while((pr
= physr_get_iter(&iter
)) && off
< end
) {
2109 assert(pb
->refcount
> 1);
2110 assert(pb
->share_flag
== PBSH_SMAP
);
2112 if(!(pr
= map_clone_ph_block(vmd
, vrd
, pr
, &iter
)))
2115 physr_incr_iter(&iter
);
2116 off
+= VM_PAGE_SIZE
;
2123 /*===========================================================================*
2125 *===========================================================================*/
2126 static void rm_phys_regions(struct vir_region
*region
,
2127 vir_bytes begin
, vir_bytes length
)
2129 /* Remove all phys regions between @begin and @begin+length.
2131 * Don't update the page table, because we will update it at map_memory()
2134 struct phys_region
*pr
;
2137 physr_start_iter(region
->phys
, &iter
, begin
, AVL_GREATER_EQUAL
);
2138 while((pr
= physr_get_iter(&iter
)) && pr
->offset
< begin
+ length
) {
2139 pb_unreferenced(region
, pr
, 1);
2140 physr_start_iter(region
->phys
, &iter
, begin
,
2146 /*===========================================================================*
2148 *===========================================================================*/
2149 int map_memory(endpoint_t sour
, endpoint_t dest
,
2150 vir_bytes virt_s
, vir_bytes virt_d
, vir_bytes length
, int flag
)
2152 /* This is the entry point. This function will be called by handle_memory() when
2153 * VM recieves a map-memory request.
2155 struct vmproc
*vms
, *vmd
;
2156 struct vir_region
*vrs
, *vrd
;
2157 vir_bytes offset_s
, offset_d
;
2161 if(vm_isokendpt(sour
, &p
) != OK
)
2162 panic("map_memory: bad endpoint: %d", sour
);
2164 if(vm_isokendpt(dest
, &p
) != OK
)
2165 panic("map_memory: bad endpoint: %d", dest
);
2168 vrs
= map_lookup(vms
, virt_s
, NULL
);
2170 vrd
= map_lookup(vmd
, virt_d
, NULL
);
2173 /* Linear address -> offset from start of vir region. */
2174 offset_s
= virt_s
- vrs
->vaddr
;
2175 offset_d
= virt_d
- vrd
->vaddr
;
2177 /* Make sure that the range in the source process has been mapped
2178 * to physical memory.
2180 map_handle_memory(vms
, vrs
, offset_s
, length
, 0);
2183 rm_phys_regions(vrd
, offset_d
, length
);
2186 r
= do_map_memory(vms
, vmd
, vrs
, vrd
, offset_s
, offset_d
, length
, flag
);
2191 /*===========================================================================*
2192 * get_clean_phys_region *
2193 *===========================================================================*/
2194 static struct phys_region
*
2195 get_clean_phys_region(struct vmproc
*vmp
, vir_bytes vaddr
, struct vir_region
**ret_region
)
2197 struct vir_region
*region
;
2199 struct phys_region
*ph
;
2203 if(!(region
= map_lookup(vmp
, mapaddr
, &ph
)) || !ph
) {
2204 printf("VM: get_clean_phys_region: 0x%lx not found\n", vaddr
);
2208 assert(mapaddr
>= region
->vaddr
);
2209 assert(mapaddr
< region
->vaddr
+ region
->length
);
2211 /* If it's mapped more than once, make a copy. */
2212 assert(ph
->ph
->refcount
> 0);
2213 if(ph
->ph
->refcount
> 1) {
2214 if(!(ph
= map_clone_ph_block(vmp
, region
,
2216 printf("VM: get_clean_phys_region: ph copy failed\n");
2221 assert(ph
->ph
->refcount
== 1);
2223 *ret_region
= region
;
2228 static int getblock(struct vmproc
*vmp
, u64_t id
, vir_bytes vaddr
, int pages
)
2231 struct phys_region
*ph
;
2232 struct vir_region
*region
;
2238 /* Try to get the yielded block */
2239 blockid
.owner
= vmp
->vm_endpoint
;
2241 avl
= get_yielded_avl(blockid
);
2242 if(!(yb
= yielded_search(avl
, blockid
, AVL_EQUAL
))) {
2246 if(yb
->pages
!= pages
) {
2247 printf("VM: getblock: length mismatch (%d != %d)\n",
2252 phaddr
= yb
->physaddr
;
2254 for(p
= 0; p
< pages
; p
++) {
2255 /* Get the intended phys region, make sure refcount is 1. */
2256 if(!(ph
= get_clean_phys_region(vmp
, vaddr
, ®ion
))) {
2257 printf("VM: getblock: not found for %d\n", vmp
->vm_endpoint
);
2261 assert(ph
->ph
->refcount
== 1);
2263 /* Free the block that is currently there. */
2264 free_mem(ABS2CLICK(ph
->ph
->phys
), 1);
2266 /* Set the phys block to new addr and update pagetable. */
2267 USE(ph
->ph
, ph
->ph
->phys
= phaddr
;);
2268 if(map_ph_writept(vmp
, region
, ph
) != OK
) {
2269 /* Presumably it was mapped, so there is no reason
2270 * updating should fail.
2272 panic("do_get_block: couldn't write pt");
2275 vaddr
+= VM_PAGE_SIZE
;
2276 phaddr
+= VM_PAGE_SIZE
;
2279 /* Forget about the yielded block and free the struct. */
2280 freeyieldednode(yb
, 0);
2285 static int yieldblock(struct vmproc
*vmp
, u64_t id
,
2286 vir_bytes vaddr
, yielded_t
**retyb
, int pages
)
2289 vir_bytes mem_clicks
, v
, p
, new_phaddr
;
2290 struct vir_region
*region
;
2291 struct phys_region
*ph
= NULL
, *prev_ph
= NULL
, *first_ph
= NULL
;
2295 /* Makes no sense if yielded block ID already exists, and
2296 * is likely a serious bug in the caller.
2299 blockid
.owner
= vmp
->vm_endpoint
;
2300 avl
= get_yielded_avl(blockid
);
2301 if(yielded_search(avl
, blockid
, AVL_EQUAL
)) {
2306 if((vaddr
% VM_PAGE_SIZE
) || pages
< 1) return EFAULT
;
2309 for(p
= 0; p
< pages
; p
++) {
2310 if(!(region
= map_lookup(vmp
, v
, &ph
)) || !ph
) {
2311 printf("VM: do_yield_block: not found for %d\n",
2315 if(!(region
->flags
& VR_ANON
)) {
2316 printf("VM: yieldblock: non-anon 0x%lx\n", v
);
2319 if(ph
->ph
->refcount
!= 1) {
2320 printf("VM: do_yield_block: mapped not once for %d\n",
2325 if(ph
->ph
->phys
!= prev_ph
->ph
->phys
+ VM_PAGE_SIZE
) {
2326 printf("VM: physically discontiguous yield\n");
2331 if(!first_ph
) first_ph
= ph
;
2335 /* Make a new block to record the yielding in. */
2336 if(!SLABALLOC(newyb
)) {
2340 assert(!(ph
->ph
->phys
% VM_PAGE_SIZE
));
2342 if((mem_clicks
= alloc_mem(pages
, PAF_CLEAR
)) == NO_MEM
) {
2347 /* Update yielded block info. */
2349 newyb
->id
= blockid
;
2350 newyb
->physaddr
= first_ph
->ph
->phys
;
2351 newyb
->pages
= pages
;
2352 newyb
->younger
= NULL
;);
2354 new_phaddr
= CLICK2ABS(mem_clicks
);
2356 /* Set new phys block to new addr and update pagetable. */
2358 for(p
= 0; p
< pages
; p
++) {
2359 region
= map_lookup(vmp
, v
, &ph
);
2360 assert(region
&& ph
);
2361 assert(ph
->ph
->refcount
== 1);
2363 ph
->ph
->phys
= new_phaddr
;);
2364 if(map_ph_writept(vmp
, region
, ph
) != OK
) {
2365 /* Presumably it was mapped, so there is no reason
2366 * updating should fail.
2368 panic("yield_block: couldn't write pt");
2371 new_phaddr
+= VM_PAGE_SIZE
;
2374 /* Remember yielded block. */
2376 yielded_insert(avl
, newyb
);
2379 /* Add to LRU list too. It's the youngest block. */
2384 lru_youngest
->younger
= newyb
;);
2390 newyb
->older
= lru_youngest
;);
2392 lru_youngest
= newyb
;
2402 /*===========================================================================*
2404 *===========================================================================*/
2405 int do_forgetblocks(message
*m
)
2409 endpoint_t caller
= m
->m_source
;
2411 if(vm_isokendpt(caller
, &n
) != OK
)
2412 panic("do_yield_block: message from strange source: %d",
2417 free_yielded_proc(vmp
);
2422 /*===========================================================================*
2424 *===========================================================================*/
2425 int do_forgetblock(message
*m
)
2429 endpoint_t caller
= m
->m_source
;
2435 if(vm_isokendpt(caller
, &n
) != OK
)
2436 panic("do_yield_block: message from strange source: %d",
2441 id
= make64(m
->VMFB_IDLO
, m
->VMFB_IDHI
);
2444 blockid
.owner
= vmp
->vm_endpoint
;
2445 avl
= get_yielded_avl(blockid
);
2446 if((yb
= yielded_search(avl
, blockid
, AVL_EQUAL
))) {
2447 freeyieldednode(yb
, 1);
2453 /*===========================================================================*
2454 * do_yieldblockgetblock *
2455 *===========================================================================*/
2456 int do_yieldblockgetblock(message
*m
)
2458 u64_t yieldid
, getid
;
2460 endpoint_t caller
= m
->m_source
;
2462 yielded_t
*yb
= NULL
;
2466 if(vm_isokendpt(caller
, &n
) != OK
)
2467 panic("do_yieldblockgetblock: message from strange source: %d",
2472 pages
= m
->VMYBGB_LEN
/ VM_PAGE_SIZE
;
2474 if((m
->VMYBGB_LEN
% VM_PAGE_SIZE
) || pages
< 1) {
2478 printf("vm: non-page-aligned or short block length\n");
2483 yieldid
= make64(m
->VMYBGB_YIELDIDLO
, m
->VMYBGB_YIELDIDHI
);
2484 getid
= make64(m
->VMYBGB_GETIDLO
, m
->VMYBGB_GETIDHI
);
2486 if(cmp64(yieldid
, VM_BLOCKID_NONE
) != 0) {
2487 /* A block was given to yield. */
2488 yieldblock(vmp
, yieldid
, (vir_bytes
) m
->VMYBGB_VADDR
, &yb
,
2492 if(cmp64(getid
, VM_BLOCKID_NONE
) != 0) {
2493 /* A block was given to get. */
2494 r
= getblock(vmp
, getid
, (vir_bytes
) m
->VMYBGB_VADDR
, pages
);
2500 void map_setparent(struct vmproc
*vmp
)
2503 struct vir_region
*vr
;
2504 region_start_iter_least(&vmp
->vm_regions_avl
, &iter
);
2505 while((vr
= region_get_iter(&iter
))) {
2506 USE(vr
, vr
->parent
= vmp
;);
2507 region_incr_iter(&iter
);