5 #include <minix/callnr.h>
6 #include <minix/type.h>
7 #include <minix/config.h>
8 #include <minix/const.h>
9 #include <minix/sysutil.h>
10 #include <minix/syslib.h>
11 #include <minix/debug.h>
12 #include <minix/bitmap.h>
13 #include <minix/hash.h>
14 #include <machine/multiboot.h>
24 #include <sys/param.h>
31 #include "sanitycheck.h"
36 static yielded_t
*lru_youngest
= NULL
, *lru_oldest
= NULL
;
38 /* Should a physblock be mapped writable? */
39 #define WRITABLE(r, pb) \
40 (((r)->flags & VR_WRITABLE) && \
41 (((r)->flags & (VR_DIRECT | VR_SHARED)) || \
44 static int map_new_physblock(struct vmproc
*vmp
, struct vir_region
45 *region
, vir_bytes offset
, vir_bytes length
, phys_bytes what
, u32_t
46 allocflags
, int written
);
48 static int map_ph_writept(struct vmproc
*vmp
, struct vir_region
*vr
,
49 struct phys_region
*pr
);
51 static phys_bytes
freeyieldednode(yielded_t
*node
, int freemem
);
53 static struct vir_region
*map_copy_region(struct vmproc
*vmp
, struct
56 static struct phys_region
*map_clone_ph_block(struct vmproc
*vmp
,
57 struct vir_region
*region
, struct phys_region
*ph
, physr_iter
*iter
);
60 static void lrucheck(void);
63 /* hash table of yielded blocks */
64 #define YIELD_HASHSIZE 65536
65 static yielded_avl vm_yielded_blocks
[YIELD_HASHSIZE
];
67 static int avl_inited
= 0;
69 void map_region_init(void)
73 for(h
= 0; h
< YIELD_HASHSIZE
; h
++)
74 yielded_init(&vm_yielded_blocks
[h
]);
78 static yielded_avl
*get_yielded_avl(block_id_t id
)
84 hash_i_64(id
.owner
, id
.id
, h
);
85 h
= h
% YIELD_HASHSIZE
;
88 assert(h
< YIELD_HASHSIZE
);
90 return &vm_yielded_blocks
[h
];
93 static char *map_name(struct vir_region
*vr
)
95 static char name
[100];
97 int type
= vr
->flags
& (VR_ANON
|VR_DIRECT
);
100 typename
= "anonymous";
106 panic("unknown mapping type: %d", type
);
123 tag
= "unknown tag value";
127 sprintf(name
, "%s, %s", typename
, tag
);
132 void map_printregion(struct vmproc
*vmp
, struct vir_region
*vr
)
135 struct phys_region
*ph
;
136 printf("map_printmap: map_name: %s\n", map_name(vr
));
137 printf("\t%lx (len 0x%lx, %lukB), %p\n",
138 vr
->vaddr
, vr
->length
, vr
->length
/1024, map_name(vr
));
139 printf("\t\tphysblocks:\n");
140 physr_start_iter_least(vr
->phys
, &iter
);
141 while((ph
= physr_get_iter(&iter
))) {
142 printf("\t\t@ %lx (refs %d): phys 0x%lx\n",
143 (vr
->vaddr
+ ph
->offset
),
144 ph
->ph
->refcount
, ph
->ph
->phys
);
145 physr_incr_iter(&iter
);
149 /*===========================================================================*
151 *===========================================================================*/
152 void map_printmap(vmp
)
155 struct vir_region
*vr
;
158 printf("memory regions in process %d:\n", vmp
->vm_endpoint
);
160 region_start_iter_least(&vmp
->vm_regions_avl
, &iter
);
161 while((vr
= region_get_iter(&iter
))) {
162 map_printregion(vmp
, vr
);
163 region_incr_iter(&iter
);
167 static struct vir_region
*getnextvr(struct vir_region
*vr
)
169 struct vir_region
*nextvr
;
172 region_start_iter(&vr
->parent
->vm_regions_avl
, &v_iter
, vr
->vaddr
, AVL_EQUAL
);
173 assert(region_get_iter(&v_iter
));
174 assert(region_get_iter(&v_iter
) == vr
);
175 region_incr_iter(&v_iter
);
176 nextvr
= region_get_iter(&v_iter
);
177 if(!nextvr
) return NULL
;
179 assert(vr
->parent
== nextvr
->parent
);
180 assert(vr
->vaddr
< nextvr
->vaddr
);
181 assert(vr
->vaddr
+ vr
->length
<= nextvr
->vaddr
);
187 /*===========================================================================*
188 * map_sanitycheck_pt *
189 *===========================================================================*/
190 static int map_sanitycheck_pt(struct vmproc
*vmp
,
191 struct vir_region
*vr
, struct phys_region
*pr
)
193 struct phys_block
*pb
= pr
->ph
;
202 r
= pt_writemap(vmp
, &vmp
->vm_pt
, vr
->vaddr
+ pr
->offset
,
203 pb
->phys
, VM_PAGE_SIZE
, PTF_PRESENT
| PTF_USER
| rw
, WMF_VERIFY
);
206 printf("proc %d phys_region 0x%lx sanity check failed\n",
207 vmp
->vm_endpoint
, pr
->offset
);
208 map_printregion(vmp
, vr
);
214 /*===========================================================================*
216 *===========================================================================*/
217 void map_sanitycheck(char *file
, int line
)
223 /* Macro for looping over all physical blocks of all regions of
226 #define ALLREGIONS(regioncode, physcode) \
227 for(vmp = vmproc; vmp < &vmproc[VMP_NR]; vmp++) { \
228 region_iter v_iter; \
229 struct vir_region *vr; \
230 if(!(vmp->vm_flags & VMF_INUSE)) \
232 region_start_iter_least(&vmp->vm_regions_avl, &v_iter); \
233 while((vr = region_get_iter(&v_iter))) { \
235 struct phys_region *pr; \
237 physr_start_iter_least(vr->phys, &iter); \
238 while((pr = physr_get_iter(&iter))) { \
240 physr_incr_iter(&iter); \
242 region_incr_iter(&v_iter); \
246 #define MYSLABSANE(s) MYASSERT(slabsane_f(__FILE__, __LINE__, s, sizeof(*(s))))
247 /* Basic pointers check. */
248 ALLREGIONS(MYSLABSANE(vr
),MYSLABSANE(pr
); MYSLABSANE(pr
->ph
);MYSLABSANE(pr
->parent
));
249 ALLREGIONS(/* MYASSERT(vr->parent == vmp) */,MYASSERT(pr
->parent
== vr
););
251 /* Do counting for consistency check. */
252 ALLREGIONS(;,USE(pr
->ph
, pr
->ph
->seencount
= 0;););
253 ALLREGIONS(;,USE(pr
->ph
, pr
->ph
->seencount
++;);
254 if(pr
->ph
->seencount
== 1) {
255 if(!(pr
->parent
->flags
& VR_DIRECT
)) {
256 MYASSERT(usedpages_add(pr
->ph
->phys
,
257 VM_PAGE_SIZE
) == OK
);
262 /* Do consistency check. */
263 ALLREGIONS({ struct vir_region
*nextvr
= getnextvr(vr
);
265 MYASSERT(vr
->vaddr
< nextvr
->vaddr
);
266 MYASSERT(vr
->vaddr
+ vr
->length
<= nextvr
->vaddr
);
269 MYASSERT(!(vr
->vaddr
% VM_PAGE_SIZE
));,
270 if(pr
->ph
->refcount
!= pr
->ph
->seencount
) {
272 printf("ph in vr %p: 0x%lx refcount %u "
273 "but seencount %u\n",
275 pr
->ph
->refcount
, pr
->ph
->seencount
);
279 struct phys_region
*others
;
280 if(pr
->ph
->refcount
> 0) {
281 MYASSERT(pr
->ph
->firstregion
);
282 if(pr
->ph
->refcount
== 1) {
283 MYASSERT(pr
->ph
->firstregion
== pr
);
286 MYASSERT(!pr
->ph
->firstregion
);
288 for(others
= pr
->ph
->firstregion
; others
;
289 others
= others
->next_ph_list
) {
291 MYASSERT(others
->ph
== pr
->ph
);
294 MYASSERT(pr
->ph
->refcount
== n_others
);
296 MYASSERT(pr
->ph
->refcount
== pr
->ph
->seencount
);
297 MYASSERT(!(pr
->offset
% VM_PAGE_SIZE
)););
298 ALLREGIONS(,MYASSERT(map_sanitycheck_pt(vmp
, vr
, pr
) == OK
));
301 #define LRUCHECK lrucheck()
303 static void lrucheck(void)
307 /* list is empty and ok if both ends point to null. */
308 if(!lru_youngest
&& !lru_oldest
)
311 /* if not, both should point to something. */
312 SLABSANE(lru_youngest
);
313 SLABSANE(lru_oldest
);
315 assert(!lru_youngest
->younger
);
316 assert(!lru_oldest
->older
);
318 for(list
= lru_youngest
; list
; list
= list
->older
) {
321 SLABSANE(list
->younger
);
322 assert(list
->younger
->older
== list
);
323 } else assert(list
== lru_youngest
);
325 SLABSANE(list
->older
);
326 assert(list
->older
->younger
== list
);
327 } else assert(list
== lru_oldest
);
331 void blockstats(void)
339 s
= getuptime(&ticks
);
345 for(list
= lru_youngest
; list
; list
= list
->older
) {
351 printf("%d blocks, %lukB; ", blocks
, mem
/1024);
360 /*=========================================================================*
362 *=========================================================================*/
363 static int map_ph_writept(struct vmproc
*vmp
, struct vir_region
*vr
,
364 struct phys_region
*pr
)
367 struct phys_block
*pb
= pr
->ph
;
369 assert(!(vr
->vaddr
% VM_PAGE_SIZE
));
370 assert(!(pr
->offset
% VM_PAGE_SIZE
));
371 assert(pb
->refcount
> 0);
378 if(pt_writemap(vmp
, &vmp
->vm_pt
, vr
->vaddr
+ pr
->offset
,
379 pb
->phys
, VM_PAGE_SIZE
, PTF_PRESENT
| PTF_USER
| rw
,
383 WMF_OVERWRITE
) != OK
) {
384 printf("VM: map_writept: pt_writemap failed\n");
389 USE(pr
, pr
->written
= 1;);
395 #define SLOT_FAIL ((vir_bytes) -1)
397 /*===========================================================================*
398 * region_find_slot_range *
399 *===========================================================================*/
400 static vir_bytes
region_find_slot_range(struct vmproc
*vmp
,
401 vir_bytes minv
, vir_bytes maxv
, vir_bytes length
)
403 struct vir_region
*lastregion
;
404 vir_bytes startv
= 0;
408 SANITYCHECK(SCL_FUNCTIONS
);
410 /* Length must be reasonable. */
413 /* Special case: allow caller to set maxv to 0 meaning 'I want
414 * it to be mapped in right here.'
417 maxv
= minv
+ length
;
421 printf("region_find_slot: minv 0x%lx and bytes 0x%lx\n",
427 /* Basic input sanity checks. */
428 assert(!(length
% VM_PAGE_SIZE
));
430 printf("VM: 1 minv: 0x%lx maxv: 0x%lx length: 0x%lx\n",
436 if(minv
+ length
> maxv
)
439 #define FREEVRANGE_TRY(rangestart, rangeend) { \
440 vir_bytes frstart = (rangestart), frend = (rangeend); \
441 frstart = MAX(frstart, minv); \
442 frend = MIN(frend, maxv); \
443 if(frend > frstart && (frend - frstart) >= length) { \
444 startv = frend-length; \
448 #define FREEVRANGE(start, end) { \
449 assert(!foundflag); \
450 FREEVRANGE_TRY(((start)+VM_PAGE_SIZE), ((end)-VM_PAGE_SIZE)); \
452 FREEVRANGE_TRY((start), (end)); \
456 /* find region after maxv. */
457 region_start_iter(&vmp
->vm_regions_avl
, &iter
, maxv
, AVL_GREATER_EQUAL
);
458 lastregion
= region_get_iter(&iter
);
461 /* This is the free virtual address space after the last region. */
462 region_start_iter(&vmp
->vm_regions_avl
, &iter
, maxv
, AVL_LESS
);
463 lastregion
= region_get_iter(&iter
);
464 FREEVRANGE(lastregion
?
465 lastregion
->vaddr
+lastregion
->length
: 0, VM_DATATOP
);
469 struct vir_region
*vr
;
470 while((vr
= region_get_iter(&iter
)) && !foundflag
) {
471 struct vir_region
*nextvr
;
472 region_decr_iter(&iter
);
473 nextvr
= region_get_iter(&iter
);
474 FREEVRANGE(nextvr
? nextvr
->vaddr
+nextvr
->length
: 0,
480 printf("VM: region_find_slot: no 0x%lx bytes found for %d between 0x%lx and 0x%lx\n",
481 length
, vmp
->vm_endpoint
, minv
, maxv
);
486 /* However we got it, startv must be in the requested range. */
487 assert(startv
>= minv
);
488 assert(startv
< maxv
);
489 assert(startv
+ length
<= maxv
);
491 /* remember this position as a hint for next time. */
492 vmp
->vm_region_top
= startv
+ length
;
497 /*===========================================================================*
499 *===========================================================================*/
500 static vir_bytes
region_find_slot(struct vmproc
*vmp
,
501 vir_bytes minv
, vir_bytes maxv
, vir_bytes length
)
503 vir_bytes v
, hint
= vmp
->vm_region_top
;
505 /* use the top of the last inserted region as a minv hint if
506 * possible. remember that a zero maxv is a special case.
509 if(maxv
&& hint
< maxv
&& hint
>= minv
) {
510 v
= region_find_slot_range(vmp
, minv
, hint
, length
);
516 return region_find_slot_range(vmp
, minv
, maxv
, length
);
519 struct vir_region
*region_new(struct vmproc
*vmp
, vir_bytes startv
, vir_bytes length
, int flags
)
522 struct vir_region
*newregion
;
524 if(!(SLABALLOC(newregion
))) {
525 printf("vm: region_new: could not allocate\n");
529 /* Fill in node details. */
531 newregion
->vaddr
= startv
;
532 newregion
->length
= length
;
533 newregion
->flags
= flags
;
534 newregion
->tag
= VRT_NONE
;
535 newregion
->lower
= newregion
->higher
= NULL
;
536 newregion
->parent
= vmp
;);
540 printf("VM: region_new: allocating phys avl failed\n");
544 USE(newregion
, newregion
->phys
= phavl
;);
545 physr_init(newregion
->phys
);
550 /*===========================================================================*
552 *===========================================================================*/
553 struct vir_region
*map_page_region(vmp
, minv
, maxv
, length
,
554 what
, flags
, mapflags
)
563 struct vir_region
*newregion
;
566 assert(!(length
% VM_PAGE_SIZE
));
568 SANITYCHECK(SCL_FUNCTIONS
);
570 if((flags
& VR_CONTIG
) && !(mapflags
& MF_PREALLOC
)) {
571 printf("map_page_region: can't make contiguous allocation without preallocating\n");
575 startv
= region_find_slot(vmp
, minv
, maxv
, length
);
576 if (startv
== SLOT_FAIL
)
579 /* Now we want a new region. */
580 if(!(newregion
= region_new(vmp
, startv
, length
, flags
))) {
581 printf("VM: map_page_region: allocating region failed\n");
585 /* If we know what we're going to map to, map it right away. */
586 if(what
!= MAP_NONE
) {
587 assert(!(what
% VM_PAGE_SIZE
));
588 assert(!(startv
% VM_PAGE_SIZE
));
589 assert(!(mapflags
& MF_PREALLOC
));
590 if(map_new_physblock(vmp
, newregion
, 0, length
,
591 what
, PAF_CLEAR
, 0) != OK
) {
592 printf("VM: map_new_physblock failed\n");
594 SLABFREE(newregion
->phys
););
600 if((flags
& VR_ANON
) && (mapflags
& MF_PREALLOC
)) {
601 if(map_handle_memory(vmp
, newregion
, 0, length
, 1) != OK
) {
602 printf("VM: map_page_region: prealloc failed\n");
604 SLABFREE(newregion
->phys
););
610 /* Pre-allocations should be uninitialized, but after that it's a
613 USE(newregion
, newregion
->flags
&= ~VR_UNINITIALIZED
;);
616 region_insert(&vmp
->vm_regions_avl
, newregion
);
619 assert(startv
== newregion
->vaddr
);
621 struct vir_region
*nextvr
;
622 if((nextvr
= getnextvr(newregion
))) {
623 assert(newregion
->vaddr
< nextvr
->vaddr
);
628 SANITYCHECK(SCL_FUNCTIONS
);
633 static struct phys_region
*reset_physr_iter(struct vir_region
*region
,
634 physr_iter
*iter
, vir_bytes offset
)
636 struct phys_region
*ph
;
638 physr_start_iter(region
->phys
, iter
, offset
, AVL_EQUAL
);
639 ph
= physr_get_iter(iter
);
641 assert(ph
->offset
== offset
);
646 /*===========================================================================*
648 *===========================================================================*/
649 static int map_subfree(struct vir_region
*region
,
650 vir_bytes start
, vir_bytes len
)
652 struct phys_region
*pr
;
654 vir_bytes end
= start
+len
;
661 SLABSANE(region
->phys
);
662 physr_start_iter_least(region
->phys
, &iter
);
663 while((pr
= physr_get_iter(&iter
))) {
664 struct phys_region
*others
;
665 struct phys_block
*pb
;
669 for(others
= pb
->firstregion
; others
;
670 others
= others
->next_ph_list
) {
671 assert(others
->ph
== pb
);
673 physr_incr_iter(&iter
);
678 if(start
== 0 && len
== region
->length
)
681 physr_init_iter(&iter
);
682 physr_start_iter(region
->phys
, &iter
, start
, AVL_GREATER_EQUAL
);
683 while((pr
= physr_get_iter(&iter
))) {
684 physr_incr_iter(&iter
);
685 if(pr
->offset
>= end
)
687 pb_unreferenced(region
, pr
, !full
);
689 physr_start_iter(region
->phys
, &iter
,
690 pr
->offset
, AVL_GREATER_EQUAL
);
696 physr_init(region
->phys
);
701 /*===========================================================================*
703 *===========================================================================*/
704 static int map_free(struct vir_region
*region
)
708 if((r
=map_subfree(region
, 0, region
->length
)) != OK
) {
709 printf("%d\n", __LINE__
);
714 SLABFREE(region
->phys
););
720 /*===========================================================================*
721 * yielded_block_cmp *
722 *===========================================================================*/
723 int yielded_block_cmp(struct block_id
*id1
, struct block_id
*id2
)
725 if(id1
->owner
< id2
->owner
)
727 if(id1
->owner
> id2
->owner
)
729 return cmp64(id1
->id
, id2
->id
);
733 /*===========================================================================*
734 * free_yielded_proc *
735 *===========================================================================*/
736 static vir_bytes
free_yielded_proc(struct vmproc
*vmp
)
741 SANITYCHECK(SCL_FUNCTIONS
);
743 /* Free associated regions. */
744 for(h
= 0; h
< YIELD_HASHSIZE
&& vmp
->vm_yielded
> 0; h
++) {
747 yielded_avl
*avl
= &vm_yielded_blocks
[h
];
748 yielded_start_iter_least(avl
, &iter
);
749 while((yb
= yielded_get_iter(&iter
))) {
752 yielded_incr_iter(&iter
);
753 if(yb
->id
.owner
!= vmp
->vm_endpoint
)
755 next_yb
= yielded_get_iter(&iter
);
756 total
+= freeyieldednode(yb
, 1);
757 /* the above removal invalidated our iter; restart it
758 * for the node we want to start at.
761 yielded_start_iter(avl
, &iter
, next_yb
->id
, AVL_EQUAL
);
762 assert(yielded_get_iter(&iter
) == next_yb
);
770 static phys_bytes
freeyieldednode(yielded_t
*node
, int freemem
)
772 yielded_t
*older
, *younger
, *removed
;
782 younger
= node
->younger
;
787 assert(younger
->older
== node
);
788 USE(younger
, younger
->older
= node
->older
;);
790 assert(node
== lru_youngest
);
791 lru_youngest
= node
->older
;
796 assert(older
->younger
== node
);
797 USE(older
, older
->younger
= node
->younger
;);
799 assert(node
== lru_oldest
);
800 lru_oldest
= node
->younger
;
807 if(vm_isokendpt(node
->id
.owner
, &p
) != OK
)
808 panic("out of date owner of yielded block %d", node
->id
.owner
);
809 avl
= get_yielded_avl(node
->id
);
810 removed
= yielded_remove(avl
, node
->id
);
811 assert(removed
== node
);
812 assert(vmproc
[p
].vm_yielded
> 0);
813 vmproc
[p
].vm_yielded
--;
815 /* Free associated memory if requested. */
818 free_mem(ABS2CLICK(node
->physaddr
), node
->pages
);
827 /*========================================================================*
829 *========================================================================*/
830 vir_bytes
free_yielded(vir_bytes max_bytes
)
833 /* PRIVATE yielded_t *lru_youngest = NULL, *lru_oldest = NULL; */
837 while(freed
< max_bytes
&& lru_oldest
) {
838 SLABSANE(lru_oldest
);
839 freed
+= freeyieldednode(lru_oldest
, 1);
846 /*========================================================================*
848 *========================================================================*/
849 int map_free_proc(vmp
)
852 struct vir_region
*r
;
854 while((r
= region_search_root(&vmp
->vm_regions_avl
))) {
855 SANITYCHECK(SCL_DETAIL
);
859 region_remove(&vmp
->vm_regions_avl
, r
->vaddr
); /* For sanity checks. */
864 SANITYCHECK(SCL_DETAIL
);
867 region_init(&vmp
->vm_regions_avl
);
869 /* Free associated yielded blocks. */
870 free_yielded_proc(vmp
);
872 SANITYCHECK(SCL_FUNCTIONS
);
877 /*===========================================================================*
879 *===========================================================================*/
880 struct vir_region
*map_lookup(vmp
, offset
, physr
)
883 struct phys_region
**physr
;
885 struct vir_region
*r
;
887 SANITYCHECK(SCL_FUNCTIONS
);
890 if(!region_search_root(&vmp
->vm_regions_avl
))
891 panic("process has no regions: %d", vmp
->vm_endpoint
);
894 if((r
= region_search(&vmp
->vm_regions_avl
, offset
, AVL_LESS_EQUAL
))) {
896 if(offset
>= r
->vaddr
&& offset
< r
->vaddr
+ r
->length
) {
897 ph
= offset
- r
->vaddr
;
899 *physr
= physr_search(r
->phys
, ph
, AVL_EQUAL
);
900 assert((*physr
)->offset
== ph
);
906 SANITYCHECK(SCL_FUNCTIONS
);
911 static u32_t
vrallocflags(u32_t flags
)
913 u32_t allocflags
= 0;
915 if(flags
& VR_PHYS64K
)
916 allocflags
|= PAF_ALIGN64K
;
917 if(flags
& VR_LOWER16MB
)
918 allocflags
|= PAF_LOWER16MB
;
919 if(flags
& VR_LOWER1MB
)
920 allocflags
|= PAF_LOWER1MB
;
921 if(flags
& VR_CONTIG
)
922 allocflags
|= PAF_CONTIG
;
927 /*===========================================================================*
928 * map_new_physblock *
929 *===========================================================================*/
930 static int map_new_physblock(vmp
, region
, start_offset
, length
,
931 what_mem
, allocflags
, written
)
933 struct vir_region
*region
;
934 vir_bytes start_offset
;
940 struct memlist
*memlist
, *ml
;
942 vir_bytes mapped
= 0;
943 vir_bytes offset
= start_offset
;
945 SANITYCHECK(SCL_FUNCTIONS
);
947 assert(!(length
% VM_PAGE_SIZE
));
949 if((region
->flags
& VR_CONTIG
) &&
950 (start_offset
> 0 || length
< region
->length
)) {
951 printf("VM: region length 0x%lx, offset 0x%lx length 0x%lx\n",
952 region
->length
, start_offset
, length
);
954 printf("VM: map_new_physblock: non-full contig allocation requested\n");
958 /* Memory for new physical block. */
959 allocflags
|= vrallocflags(region
->flags
);
961 if(allocflags
& PAF_CONTIG
) {
962 assert(what_mem
== MAP_NONE
);
963 if((what_mem
= alloc_mem(length
/VM_PAGE_SIZE
, allocflags
)) == NO_MEM
) {
966 what_mem
= CLICK2ABS(what_mem
);
967 allocflags
&= ~PAF_CONTIG
;
968 assert(what_mem
!= MAP_NONE
);
971 if(!(memlist
= alloc_mem_in_list(length
, allocflags
, what_mem
))) {
972 printf("map_new_physblock: couldn't allocate\n");
978 for(ml
= memlist
; ml
; ml
= ml
->next
) {
979 struct phys_region
*newphysr
= NULL
;
980 struct phys_block
*newpb
= NULL
;
982 /* Allocate things necessary for this chunk of memory. */
983 if(!(newpb
= pb_new(ml
->phys
)) ||
984 !(newphysr
= pb_reference(newpb
, offset
, region
))) {
985 printf("map_new_physblock: no memory for the ph slabs\n");
987 if(newpb
) SLABFREE(newpb
);
992 /* Update pagetable. */
993 if(map_ph_writept(vmp
, region
, newphysr
) != OK
) {
994 printf("map_new_physblock: map_ph_writept failed\n");
999 offset
+= VM_PAGE_SIZE
;
1000 mapped
+= VM_PAGE_SIZE
;
1004 offset
= start_offset
;
1005 /* Things did not go well. Undo everything. */
1006 for(ml
= memlist
; ml
; ml
= ml
->next
) {
1007 struct phys_region
*physr
;
1008 if((physr
= physr_search(region
->phys
, offset
,
1010 assert(physr
->ph
->refcount
== 1);
1011 pb_unreferenced(region
, physr
, 1);
1014 offset
+= VM_PAGE_SIZE
;
1016 } else assert(mapped
== length
);
1018 /* Always clean up the memlist itself, even if everything
1019 * worked we're not using the memlist nodes any more. And
1020 * the memory they reference is either freed above or in use.
1022 free_mem_list(memlist
, 0);
1024 SANITYCHECK(SCL_FUNCTIONS
);
1029 /*===========================================================================*
1030 * map_clone_ph_block *
1031 *===========================================================================*/
1032 static struct phys_region
*map_clone_ph_block(vmp
, region
, ph
, iter
)
1034 struct vir_region
*region
;
1035 struct phys_region
*ph
;
1040 phys_bytes physaddr
;
1041 struct phys_region
*newpr
;
1042 int region_has_single_block
;
1045 written
= ph
->written
;
1047 SANITYCHECK(SCL_FUNCTIONS
);
1049 /* Warning: this function will free the passed
1050 * phys_region *ph and replace it (in the same offset)
1051 * with another! So both the pointer to it
1052 * and any iterators over the phys_regions in the vir_region
1053 * will be invalid on successful return. (Iterators over
1054 * the vir_region could be invalid on unsuccessful return too.)
1057 /* This is only to be done if there is more than one copy. */
1058 assert(ph
->ph
->refcount
> 1);
1060 /* This function takes a physical block, copies its contents
1061 * into newly allocated memory, and replaces the single physical
1062 * block by one or more physical blocks with refcount 1 with the
1063 * same contents as the original. In other words, a fragmentable
1064 * version of map_copy_ph_block().
1067 /* Remember where and how much. */
1068 offset
= ph
->offset
;
1069 physaddr
= ph
->ph
->phys
;
1071 /* Now unlink the original physical block so we can replace
1077 assert(ph
->ph
->refcount
> 1);
1078 pb_unreferenced(region
, ph
, 1);
1079 assert(ph
->ph
->refcount
>= 1);
1082 SANITYCHECK(SCL_DETAIL
);
1084 /* Put new free memory in. */
1085 allocflags
= vrallocflags(region
->flags
);
1086 region_has_single_block
= (offset
== 0 && region
->length
== VM_PAGE_SIZE
);
1087 assert(region_has_single_block
|| !(allocflags
& PAF_CONTIG
));
1088 assert(!(allocflags
& PAF_CLEAR
));
1090 if(map_new_physblock(vmp
, region
, offset
, VM_PAGE_SIZE
,
1091 MAP_NONE
, allocflags
, written
) != OK
) {
1092 /* XXX original range now gone. */
1093 printf("VM: map_clone_ph_block: map_new_physblock failed.\n");
1097 /* Copy the block to the new memory.
1098 * Can only fail if map_new_physblock didn't do what we asked.
1100 if(copy_abs2region(physaddr
, region
, offset
, VM_PAGE_SIZE
) != OK
)
1101 panic("copy_abs2region failed, no good reason for that");
1103 newpr
= physr_search(region
->phys
, offset
, AVL_EQUAL
);
1105 assert(newpr
->offset
== offset
);
1108 physr_start_iter(region
->phys
, iter
, offset
, AVL_EQUAL
);
1109 assert(physr_get_iter(iter
) == newpr
);
1112 SANITYCHECK(SCL_FUNCTIONS
);
1118 /*===========================================================================*
1120 *===========================================================================*/
1121 int map_pf(vmp
, region
, offset
, write
)
1123 struct vir_region
*region
;
1128 struct phys_region
*ph
;
1131 assert(offset
>= 0);
1132 assert(offset
< region
->length
);
1134 assert(region
->flags
& VR_ANON
);
1135 assert(!(region
->vaddr
% VM_PAGE_SIZE
));
1137 virpage
= offset
- offset
% VM_PAGE_SIZE
;
1139 SANITYCHECK(SCL_FUNCTIONS
);
1141 if((ph
= physr_search(region
->phys
, offset
, AVL_LESS_EQUAL
)) &&
1142 (ph
->offset
<= offset
&& offset
< ph
->offset
+ VM_PAGE_SIZE
)) {
1143 /* Pagefault in existing block. Do copy-on-write. */
1145 assert(region
->flags
& VR_WRITABLE
);
1146 assert(ph
->ph
->refcount
> 0);
1148 if(WRITABLE(region
, ph
->ph
)) {
1149 r
= map_ph_writept(vmp
, region
, ph
);
1151 printf("map_ph_writept failed\n");
1153 if(ph
->ph
->refcount
> 0
1154 && ph
->ph
->share_flag
!= PBSH_COW
) {
1155 printf("VM: write RO mapped pages.\n");
1158 if(!map_clone_ph_block(vmp
, region
, ph
, NULL
))
1163 /* Pagefault in non-existing block. Map in new block. */
1164 if(map_new_physblock(vmp
, region
, virpage
,
1165 VM_PAGE_SIZE
, MAP_NONE
, PAF_CLEAR
, 0) != OK
) {
1166 printf("map_new_physblock failed\n");
1171 SANITYCHECK(SCL_FUNCTIONS
);
1174 printf("VM: map_pf: failed (%d)\n", r
);
1179 if(OK
!= pt_checkrange(&vmp
->vm_pt
, region
->vaddr
+virpage
,
1180 VM_PAGE_SIZE
, write
)) {
1181 panic("map_pf: pt_checkrange failed: %d", r
);
1188 /*===========================================================================*
1190 *===========================================================================*/
1191 int map_pin_memory(struct vmproc
*vmp
)
1193 struct vir_region
*vr
;
1196 region_start_iter_least(&vmp
->vm_regions_avl
, &iter
);
1197 /* Scan all memory regions. */
1198 while((vr
= region_get_iter(&iter
))) {
1199 /* Make sure region is mapped to physical memory and writable.*/
1200 r
= map_handle_memory(vmp
, vr
, 0, vr
->length
, 1);
1202 panic("map_pin_memory: map_handle_memory failed: %d", r
);
1204 region_incr_iter(&iter
);
1209 /*===========================================================================*
1210 * map_handle_memory *
1211 *===========================================================================*/
1212 int map_handle_memory(vmp
, region
, offset
, length
, write
)
1214 struct vir_region
*region
;
1215 vir_bytes offset
, length
;
1218 struct phys_region
*physr
, *nextphysr
;
1221 u32_t allocflags
= 0;
1223 if(!(region
->flags
& VR_UNINITIALIZED
)) {
1224 allocflags
= PAF_CLEAR
;
1227 #define FREE_RANGE_HERE(er1, er2) { \
1228 struct phys_region *r1 = (er1), *r2 = (er2); \
1229 vir_bytes start = offset, end = offset + length; \
1231 start = MAX(start, r1->offset + VM_PAGE_SIZE); } \
1233 end = MIN(end, r2->offset); } \
1235 SANITYCHECK(SCL_DETAIL); \
1236 if(map_new_physblock(vmp, region, start, \
1237 end-start, MAP_NONE, allocflags, 0) != OK) { \
1238 SANITYCHECK(SCL_DETAIL); \
1245 SANITYCHECK(SCL_FUNCTIONS
);
1247 assert(region
->flags
& VR_ANON
);
1248 assert(!(region
->vaddr
% VM_PAGE_SIZE
));
1249 assert(!(offset
% VM_PAGE_SIZE
));
1250 assert(!(length
% VM_PAGE_SIZE
));
1251 assert(!write
|| (region
->flags
& VR_WRITABLE
));
1253 physr_start_iter(region
->phys
, &iter
, offset
, AVL_LESS_EQUAL
);
1254 physr
= physr_get_iter(&iter
);
1257 physr_start_iter(region
->phys
, &iter
, offset
, AVL_GREATER_EQUAL
);
1258 physr
= physr_get_iter(&iter
);
1261 FREE_RANGE_HERE(NULL
, physr
);
1264 physr
= reset_physr_iter(region
, &iter
, physr
->offset
);
1265 if(physr
->offset
+ VM_PAGE_SIZE
<= offset
) {
1266 physr_incr_iter(&iter
);
1267 physr
= physr_get_iter(&iter
);
1269 FREE_RANGE_HERE(NULL
, physr
);
1271 physr
= reset_physr_iter(region
, &iter
,
1280 SANITYCHECK(SCL_DETAIL
);
1283 assert(physr
->ph
->refcount
> 0);
1284 if(!WRITABLE(region
, physr
->ph
)) {
1285 if(!(physr
= map_clone_ph_block(vmp
, region
,
1287 printf("VM: map_handle_memory: no copy\n");
1292 SANITYCHECK(SCL_DETAIL
);
1293 if((r
=map_ph_writept(vmp
, region
, physr
)) != OK
) {
1294 printf("VM: map_ph_writept failed\n");
1298 SANITYCHECK(SCL_DETAIL
);
1302 SANITYCHECK(SCL_DETAIL
);
1303 physr_incr_iter(&iter
);
1304 nextphysr
= physr_get_iter(&iter
);
1305 FREE_RANGE_HERE(physr
, nextphysr
);
1306 SANITYCHECK(SCL_DETAIL
);
1308 if(nextphysr
->offset
>= offset
+ length
)
1310 nextphysr
= reset_physr_iter(region
, &iter
,
1316 SANITYCHECK(SCL_FUNCTIONS
);
1320 printf("region start at 0x%lx offset 0x%lx len 0x%lx write %d\n",
1321 region
->vaddr
, offset
, length
, write
);
1322 printf("no changes in map_handle_memory\n");
1328 if(OK
!= pt_checkrange(&vmp
->vm_pt
, region
->vaddr
+offset
, length
, write
)) {
1329 printf("handle mem 0x%lx-0x%lx failed\n",
1330 region
->vaddr
+offset
,region
->vaddr
+offset
+length
);
1331 map_printregion(vmp
, region
);
1332 panic("checkrange failed");
1340 static int count_phys_regions(struct vir_region
*vr
)
1343 struct phys_region
*ph
;
1345 physr_start_iter_least(vr
->phys
, &iter
);
1346 while((ph
= physr_get_iter(&iter
))) {
1348 physr_incr_iter(&iter
);
1354 /*===========================================================================*
1356 *===========================================================================*/
1357 static struct vir_region
*map_copy_region(struct vmproc
*vmp
, struct vir_region
*vr
)
1359 /* map_copy_region creates a complete copy of the vir_region
1360 * data structure, linking in the same phys_blocks directly,
1361 * but all in limbo, i.e., the caller has to link the vir_region
1362 * to a process. Therefore it doesn't increase the refcount in
1363 * the phys_block; the caller has to do this once it's linked.
1364 * The reason for this is to keep the sanity checks working
1365 * within this function.
1367 struct vir_region
*newvr
;
1368 struct phys_region
*ph
;
1372 cr
= count_phys_regions(vr
);
1375 if(!(newvr
= region_new(vr
->parent
, vr
->vaddr
, vr
->length
, vr
->flags
)))
1378 physr_start_iter_least(vr
->phys
, &iter
);
1379 while((ph
= physr_get_iter(&iter
))) {
1380 struct phys_region
*newph
= pb_reference(ph
->ph
, ph
->offset
, newvr
);
1382 if(!newph
) { map_free(newvr
); return NULL
; }
1385 USE(newph
, newph
->written
= 0;);
1386 assert(count_phys_regions(vr
) == cr
);
1388 physr_incr_iter(&iter
);
1392 assert(count_phys_regions(vr
) == count_phys_regions(newvr
));
1398 /*===========================================================================*
1400 *===========================================================================*/
1401 int copy_abs2region(phys_bytes abs
, struct vir_region
*destregion
,
1402 phys_bytes offset
, phys_bytes len
)
1406 assert(destregion
->phys
);
1408 phys_bytes sublen
, suboffset
;
1409 struct phys_region
*ph
;
1411 assert(destregion
->phys
);
1412 if(!(ph
= physr_search(destregion
->phys
, offset
, AVL_LESS_EQUAL
))) {
1413 printf("VM: copy_abs2region: no phys region found (1).\n");
1416 assert(ph
->offset
<= offset
);
1417 if(ph
->offset
+VM_PAGE_SIZE
<= offset
) {
1418 printf("VM: copy_abs2region: no phys region found (2).\n");
1421 suboffset
= offset
- ph
->offset
;
1422 assert(suboffset
< VM_PAGE_SIZE
);
1424 if(sublen
> VM_PAGE_SIZE
- suboffset
)
1425 sublen
= VM_PAGE_SIZE
- suboffset
;
1426 assert(suboffset
+ sublen
<= VM_PAGE_SIZE
);
1427 if(ph
->ph
->refcount
!= 1) {
1428 printf("VM: copy_abs2region: refcount not 1.\n");
1432 if(sys_abscopy(abs
, ph
->ph
->phys
+ suboffset
, sublen
) != OK
) {
1433 printf("VM: copy_abs2region: abscopy failed.\n");
1444 /*=========================================================================*
1446 *=========================================================================*/
1447 int map_writept(struct vmproc
*vmp
)
1449 struct vir_region
*vr
;
1450 struct phys_region
*ph
;
1453 region_start_iter_least(&vmp
->vm_regions_avl
, &v_iter
);
1455 while((vr
= region_get_iter(&v_iter
))) {
1457 physr_start_iter_least(vr
->phys
, &ph_iter
);
1459 while((ph
= physr_get_iter(&ph_iter
))) {
1460 physr_incr_iter(&ph_iter
);
1462 /* If this phys block is shared as SMAP, then do
1463 * not update the page table. */
1464 if(ph
->ph
->refcount
> 1
1465 && ph
->ph
->share_flag
== PBSH_SMAP
) {
1469 if((r
=map_ph_writept(vmp
, vr
, ph
)) != OK
) {
1470 printf("VM: map_writept: failed\n");
1474 region_incr_iter(&v_iter
);
1480 /*========================================================================*
1482 *========================================================================*/
1483 int map_proc_copy(dst
, src
)
1487 /* Copy all the memory regions from the src process to the dst process. */
1488 region_init(&dst
->vm_regions_avl
);
1490 return map_proc_copy_from(dst
, src
, NULL
);
1493 /*========================================================================*
1494 * map_proc_copy_from *
1495 *========================================================================*/
1496 int map_proc_copy_from(dst
, src
, start_src_vr
)
1499 struct vir_region
*start_src_vr
;
1501 struct vir_region
*vr
;
1505 start_src_vr
= region_search_least(&src
->vm_regions_avl
);
1507 assert(start_src_vr
);
1508 assert(start_src_vr
->parent
== src
);
1509 region_start_iter(&src
->vm_regions_avl
, &v_iter
,
1510 start_src_vr
->vaddr
, AVL_EQUAL
);
1511 assert(region_get_iter(&v_iter
) == start_src_vr
);
1513 /* Copy source regions after the destination's last region (if any). */
1515 SANITYCHECK(SCL_FUNCTIONS
);
1517 while((vr
= region_get_iter(&v_iter
))) {
1518 physr_iter iter_orig
, iter_new
;
1519 struct vir_region
*newvr
;
1520 struct phys_region
*orig_ph
, *new_ph
;
1521 if(!(newvr
= map_copy_region(dst
, vr
))) {
1525 USE(newvr
, newvr
->parent
= dst
;);
1526 region_insert(&dst
->vm_regions_avl
, newvr
);
1527 physr_start_iter_least(vr
->phys
, &iter_orig
);
1528 physr_start_iter_least(newvr
->phys
, &iter_new
);
1529 while((orig_ph
= physr_get_iter(&iter_orig
))) {
1530 struct phys_block
*pb
;
1531 new_ph
= physr_get_iter(&iter_new
);
1532 /* Check two physregions both are nonnull,
1533 * are different, and match physblocks.
1537 assert(orig_ph
!= new_ph
);
1539 assert(orig_ph
->ph
== new_ph
->ph
);
1541 /* If the phys block has been shared as SMAP,
1542 * do the regular copy. */
1543 if(pb
->refcount
> 2 && pb
->share_flag
== PBSH_SMAP
) {
1544 map_clone_ph_block(dst
, newvr
,new_ph
,
1547 USE(pb
, pb
->share_flag
= PBSH_COW
;);
1550 /* Get next new physregion */
1551 physr_incr_iter(&iter_orig
);
1552 physr_incr_iter(&iter_new
);
1554 assert(!physr_get_iter(&iter_new
));
1555 region_incr_iter(&v_iter
);
1561 SANITYCHECK(SCL_FUNCTIONS
);
1565 int map_region_extend_upto_v(struct vmproc
*vmp
, vir_bytes v
)
1567 vir_bytes offset
= v
, end
;
1568 struct vir_region
*vr
, *nextvr
;
1571 if(!(vr
= region_search(&vmp
->vm_regions_avl
, offset
, AVL_LESS
))) {
1572 printf("VM: nothing to extend\n");
1576 if(!(vr
->flags
& VR_ANON
)) {
1577 printf("VM: memory range to extend not anonymous\n");
1581 assert(vr
->vaddr
<= offset
);
1582 if((nextvr
= getnextvr(vr
))) {
1583 assert(offset
<= nextvr
->vaddr
);
1586 end
= vr
->vaddr
+ vr
->length
;
1588 offset
= roundup(offset
, VM_PAGE_SIZE
);
1591 r
= map_region_extend(vmp
, vr
, offset
- end
);
1596 /*========================================================================*
1597 * map_region_extend *
1598 *========================================================================*/
1599 int map_region_extend(struct vmproc
*vmp
, struct vir_region
*vr
,
1603 struct vir_region
*nextvr
;
1606 assert(vr
->flags
& VR_ANON
);
1607 assert(!(delta
% VM_PAGE_SIZE
));
1608 if(vr
->flags
& VR_CONTIG
) {
1609 printf("VM: can't grow contig region\n");
1613 if(!delta
) return OK
;
1614 end
= vr
->vaddr
+ vr
->length
;
1615 assert(end
>= vr
->vaddr
);
1617 if(end
+ delta
<= end
) {
1618 printf("VM: strange delta 0x%lx\n", delta
);
1622 nextvr
= getnextvr(vr
);
1624 if(!nextvr
|| end
+ delta
<= nextvr
->vaddr
) {
1625 USE(vr
, vr
->length
+= delta
;);
1632 /*========================================================================*
1633 * map_region_shrink *
1634 *========================================================================*/
1635 int map_region_shrink(struct vir_region
*vr
, vir_bytes delta
)
1638 assert(vr
->flags
& VR_ANON
);
1639 assert(!(delta
% VM_PAGE_SIZE
));
1642 printf("VM: ignoring region shrink\n");
1648 struct vir_region
*map_region_lookup_tag(vmp
, tag
)
1652 struct vir_region
*vr
;
1654 region_start_iter_least(&vmp
->vm_regions_avl
, &v_iter
);
1656 while((vr
= region_get_iter(&v_iter
))) {
1659 region_incr_iter(&v_iter
);
1665 void map_region_set_tag(struct vir_region
*vr
, u32_t tag
)
1667 USE(vr
, vr
->tag
= tag
;);
1670 u32_t
map_region_get_tag(struct vir_region
*vr
)
1675 /*========================================================================*
1676 * map_unmap_region *
1677 *========================================================================*/
1678 int map_unmap_region(struct vmproc
*vmp
, struct vir_region
*r
,
1679 vir_bytes offset
, vir_bytes len
)
1681 /* Shrink the region by 'len' bytes, from the start. Unreference
1682 * memory it used to reference if any.
1684 vir_bytes regionstart
;
1686 SANITYCHECK(SCL_FUNCTIONS
);
1688 if(offset
+len
> r
->length
|| (len
% VM_PAGE_SIZE
)) {
1689 printf("VM: bogus length 0x%lx\n", len
);
1693 if(!(r
->flags
& (VR_ANON
|VR_DIRECT
))) {
1694 printf("VM: only unmap anonymous or direct memory\n");
1698 regionstart
= r
->vaddr
+ offset
;
1700 /* unreference its memory */
1701 map_subfree(r
, offset
, len
);
1703 /* if unmap was at start/end of this region, it actually shrinks */
1705 struct phys_region
*pr
;
1708 region_remove(&vmp
->vm_regions_avl
, r
->vaddr
);
1714 region_insert(&vmp
->vm_regions_avl
, r
);
1716 /* vaddr has increased; to make all the phys_regions
1717 * point to the same addresses, make them shrink by the
1720 physr_init_iter(&iter
);
1721 physr_start_iter(r
->phys
, &iter
, offset
, AVL_GREATER_EQUAL
);
1723 while((pr
= physr_get_iter(&iter
))) {
1724 assert(pr
->offset
>= offset
);
1725 USE(pr
, pr
->offset
-= len
;);
1726 physr_incr_iter(&iter
);
1728 } else if(offset
+ len
== r
->length
) {
1729 assert(len
<= r
->length
);
1733 if(r
->length
== 0) {
1734 /* Whole region disappears. Unlink and free it. */
1735 region_remove(&vmp
->vm_regions_avl
, r
->vaddr
);
1739 SANITYCHECK(SCL_DETAIL
);
1741 if(pt_writemap(vmp
, &vmp
->vm_pt
, regionstart
,
1742 MAP_NONE
, len
, 0, WMF_OVERWRITE
) != OK
) {
1743 printf("VM: map_unmap_region: pt_writemap failed\n");
1747 SANITYCHECK(SCL_FUNCTIONS
);
1752 /*========================================================================*
1754 *========================================================================*/
1755 int map_remap(struct vmproc
*dvmp
, vir_bytes da
, size_t size
,
1756 struct vir_region
*region
, vir_bytes
*r
, int readonly
)
1758 struct vir_region
*vr
;
1759 struct phys_region
*ph
;
1760 vir_bytes startv
, dst_addr
;
1763 SANITYCHECK(SCL_FUNCTIONS
);
1765 assert(region
->flags
& VR_SHARED
);
1767 /* da is handled differently */
1773 /* round up to page size */
1774 assert(!(size
% VM_PAGE_SIZE
));
1775 startv
= region_find_slot(dvmp
, dst_addr
, VM_DATATOP
, size
);
1776 if (startv
== SLOT_FAIL
) {
1779 /* when the user specifies the address, we cannot change it */
1780 if (da
&& (startv
!= dst_addr
))
1783 vr
= map_copy_region(dvmp
, region
);
1790 vr
->flags
= region
->flags
;
1794 vr
->flags
&= ~VR_WRITABLE
;
1797 assert(vr
->flags
& VR_SHARED
);
1799 region_insert(&dvmp
->vm_regions_avl
, vr
);
1801 physr_start_iter_least(vr
->phys
, &iter
);
1802 while((ph
= physr_get_iter(&iter
))) {
1803 if(map_ph_writept(dvmp
, vr
, ph
) != OK
) {
1804 panic("map_remap: map_ph_writept failed");
1806 physr_incr_iter(&iter
);
1811 SANITYCHECK(SCL_FUNCTIONS
);
1816 /*========================================================================*
1818 *========================================================================*/
1819 int map_get_phys(struct vmproc
*vmp
, vir_bytes addr
, phys_bytes
*r
)
1821 struct vir_region
*vr
;
1822 struct phys_region
*ph
;
1825 if (!(vr
= map_lookup(vmp
, addr
, NULL
)) ||
1826 (vr
->vaddr
!= addr
))
1829 if (!(vr
->flags
& VR_SHARED
))
1832 physr_start_iter_least(vr
->phys
, &iter
);
1833 ph
= physr_get_iter(&iter
);
1843 /*========================================================================*
1845 *========================================================================*/
1846 int map_get_ref(struct vmproc
*vmp
, vir_bytes addr
, u8_t
*cnt
)
1848 struct vir_region
*vr
;
1849 struct phys_region
*ph
;
1852 if (!(vr
= map_lookup(vmp
, addr
, NULL
)) ||
1853 (vr
->vaddr
!= addr
))
1856 if (!(vr
->flags
& VR_SHARED
))
1859 physr_start_iter_least(vr
->phys
, &iter
);
1860 ph
= physr_get_iter(&iter
);
1865 *cnt
= ph
->ph
->refcount
;
1870 /*========================================================================*
1872 *========================================================================*/
1873 void get_stats_info(struct vm_stats_info
*vsi
)
1877 vsi
->vsi_cached
= 0L;
1879 for(yb
= lru_youngest
; yb
; yb
= yb
->older
)
1883 void get_usage_info_kernel(struct vm_usage_info
*vui
)
1885 memset(vui
, 0, sizeof(*vui
));
1886 vui
->vui_total
= kernel_boot_info
.kernel_allocated_bytes
;
1889 static void get_usage_info_vm(struct vm_usage_info
*vui
)
1891 memset(vui
, 0, sizeof(*vui
));
1892 vui
->vui_total
= kernel_boot_info
.vm_allocated_bytes
+
1893 get_vm_self_pages() * VM_PAGE_SIZE
;
1896 /*========================================================================*
1898 *========================================================================*/
1899 void get_usage_info(struct vmproc
*vmp
, struct vm_usage_info
*vui
)
1901 struct vir_region
*vr
;
1903 struct phys_region
*ph
;
1905 region_start_iter_least(&vmp
->vm_regions_avl
, &v_iter
);
1907 memset(vui
, 0, sizeof(*vui
));
1909 if(vmp
->vm_endpoint
== VM_PROC_NR
) {
1910 get_usage_info_vm(vui
);
1914 if(vmp
->vm_endpoint
< 0) {
1915 get_usage_info_kernel(vui
);
1919 while((vr
= region_get_iter(&v_iter
))) {
1920 physr_start_iter_least(vr
->phys
, &iter
);
1921 while((ph
= physr_get_iter(&iter
))) {
1922 /* All present pages are counted towards the total. */
1923 vui
->vui_total
+= VM_PAGE_SIZE
;
1925 if (ph
->ph
->refcount
> 1) {
1926 /* Any page with a refcount > 1 is common. */
1927 vui
->vui_common
+= VM_PAGE_SIZE
;
1929 /* Any common, non-COW page is shared. */
1930 if (vr
->flags
& VR_SHARED
||
1931 ph
->ph
->share_flag
== PBSH_SMAP
)
1932 vui
->vui_shared
+= VM_PAGE_SIZE
;
1934 physr_incr_iter(&iter
);
1936 region_incr_iter(&v_iter
);
1940 /*===========================================================================*
1942 *===========================================================================*/
1943 int get_region_info(struct vmproc
*vmp
, struct vm_region_info
*vri
,
1944 int max
, vir_bytes
*nextp
)
1946 struct vir_region
*vr
;
1955 region_start_iter(&vmp
->vm_regions_avl
, &v_iter
, next
, AVL_GREATER_EQUAL
);
1956 if(!(vr
= region_get_iter(&v_iter
))) return 0;
1958 for(count
= 0; (vr
= region_get_iter(&v_iter
)) && count
< max
; count
++, vri
++) {
1959 struct phys_region
*ph1
, *ph2
;
1961 /* Report part of the region that's actually in use. */
1963 /* Get first and last phys_regions, if any */
1964 ph1
= physr_search_least(vr
->phys
);
1965 ph2
= physr_search_greatest(vr
->phys
);
1966 if(!ph1
|| !ph2
) { assert(!ph1
&& !ph2
); continue; }
1968 /* Report start+length of region starting from lowest use. */
1969 vri
->vri_addr
= vr
->vaddr
+ ph1
->offset
;
1971 vri
->vri_length
= ph2
->offset
+ VM_PAGE_SIZE
- ph1
->offset
;
1973 /* "AND" the provided protection with per-page protection. */
1974 if (!(vr
->flags
& VR_WRITABLE
))
1975 vri
->vri_prot
&= ~PROT_WRITE
;
1977 vri
->vri_flags
= (vr
->flags
& VR_SHARED
) ? MAP_IPC_SHARED
: 0;
1979 next
= vr
->vaddr
+ vr
->length
;
1980 region_incr_iter(&v_iter
);
1987 /*========================================================================*
1988 * regionprintstats *
1989 *========================================================================*/
1990 void printregionstats(struct vmproc
*vmp
)
1992 struct vir_region
*vr
;
1993 struct phys_region
*pr
;
1995 vir_bytes used
= 0, weighted
= 0;
1997 region_start_iter_least(&vmp
->vm_regions_avl
, &v_iter
);
1999 while((vr
= region_get_iter(&v_iter
))) {
2000 region_incr_iter(&v_iter
);
2001 if(vr
->flags
& VR_DIRECT
)
2003 physr_start_iter_least(vr
->phys
, &iter
);
2004 while((pr
= physr_get_iter(&iter
))) {
2005 physr_incr_iter(&iter
);
2006 used
+= VM_PAGE_SIZE
;
2007 weighted
+= VM_PAGE_SIZE
/ pr
->ph
->refcount
;
2011 printf("%6lukB %6lukB\n", used
/1024, weighted
/1024);
2016 /*===========================================================================*
2018 *===========================================================================*/
2019 static int do_map_memory(struct vmproc
*vms
, struct vmproc
*vmd
,
2020 struct vir_region
*vrs
, struct vir_region
*vrd
,
2021 vir_bytes offset_s
, vir_bytes offset_d
,
2022 vir_bytes length
, int flag
)
2024 struct phys_region
*prs
;
2025 struct phys_region
*newphysr
;
2026 struct phys_block
*pb
;
2028 u32_t pt_flag
= PTF_PRESENT
| PTF_USER
;
2031 /* Search for the first phys region in the source process. */
2032 physr_start_iter(vrs
->phys
, &iter
, offset_s
, AVL_EQUAL
);
2033 prs
= physr_get_iter(&iter
);
2035 panic("do_map_memory: no aligned phys region: %d", 0);
2037 /* flag: 0 -> read-only
2039 * -1 -> share as COW, so read-only
2042 pt_flag
|= PTF_WRITE
;
2044 pt_flag
|= PTF_READ
;
2046 /* Map phys blocks in the source process to the destination process. */
2047 end
= offset_d
+ length
;
2048 while((prs
= physr_get_iter(&iter
)) && offset_d
< end
) {
2049 /* If a SMAP share was requested but the phys block has already
2050 * been shared as COW, copy the block for the source phys region
2054 if(flag
>= 0 && pb
->refcount
> 1
2055 && pb
->share_flag
== PBSH_COW
) {
2056 if(!(prs
= map_clone_ph_block(vms
, vrs
, prs
, &iter
)))
2061 /* Allocate a new phys region. */
2062 if(!(newphysr
= pb_reference(pb
, offset_d
, vrd
)))
2065 /* If a COW share was requested but the phys block has already
2066 * been shared as SMAP, give up on COW and copy the block for
2067 * the destination phys region now.
2069 if(flag
< 0 && pb
->refcount
> 1
2070 && pb
->share_flag
== PBSH_SMAP
) {
2071 if(!(newphysr
= map_clone_ph_block(vmd
, vrd
,
2077 /* See if this is a COW share or SMAP share. */
2078 if(flag
< 0) { /* COW share */
2079 pb
->share_flag
= PBSH_COW
;
2080 /* Update the page table for the src process. */
2081 pt_writemap(vms
, &vms
->vm_pt
, offset_s
+ vrs
->vaddr
,
2082 pb
->phys
, VM_PAGE_SIZE
,
2083 pt_flag
, WMF_OVERWRITE
);
2085 else { /* SMAP share */
2086 pb
->share_flag
= PBSH_SMAP
;
2088 /* Update the page table for the destination process. */
2089 pt_writemap(vmd
, &vmd
->vm_pt
, offset_d
+ vrd
->vaddr
,
2090 pb
->phys
, VM_PAGE_SIZE
, pt_flag
, WMF_OVERWRITE
);
2093 physr_incr_iter(&iter
);
2094 offset_d
+= VM_PAGE_SIZE
;
2095 offset_s
+= VM_PAGE_SIZE
;
2100 /*===========================================================================*
2102 *===========================================================================*/
2103 int unmap_memory(endpoint_t sour
, endpoint_t dest
,
2104 vir_bytes virt_s
, vir_bytes virt_d
, vir_bytes length
, int flag
)
2107 struct vir_region
*vrd
;
2108 struct phys_region
*pr
;
2109 struct phys_block
*pb
;
2114 /* Use information on the destination process to unmap. */
2115 if(vm_isokendpt(dest
, &p
) != OK
)
2116 panic("unmap_memory: bad endpoint: %d", dest
);
2119 vrd
= map_lookup(vmd
, virt_d
, NULL
);
2122 /* Search for the first phys region in the destination process. */
2123 off
= virt_d
- vrd
->vaddr
;
2124 physr_start_iter(vrd
->phys
, &iter
, off
, AVL_EQUAL
);
2125 pr
= physr_get_iter(&iter
);
2127 panic("unmap_memory: no aligned phys region: %d", 0);
2129 /* Copy the phys block now rather than doing COW. */
2131 while((pr
= physr_get_iter(&iter
)) && off
< end
) {
2133 assert(pb
->refcount
> 1);
2134 assert(pb
->share_flag
== PBSH_SMAP
);
2136 if(!(pr
= map_clone_ph_block(vmd
, vrd
, pr
, &iter
)))
2139 physr_incr_iter(&iter
);
2140 off
+= VM_PAGE_SIZE
;
2147 /*===========================================================================*
2149 *===========================================================================*/
2150 static void rm_phys_regions(struct vir_region
*region
,
2151 vir_bytes begin
, vir_bytes length
)
2153 /* Remove all phys regions between @begin and @begin+length.
2155 * Don't update the page table, because we will update it at map_memory()
2158 struct phys_region
*pr
;
2161 physr_start_iter(region
->phys
, &iter
, begin
, AVL_GREATER_EQUAL
);
2162 while((pr
= physr_get_iter(&iter
)) && pr
->offset
< begin
+ length
) {
2163 pb_unreferenced(region
, pr
, 1);
2164 physr_start_iter(region
->phys
, &iter
, begin
,
2170 /*===========================================================================*
2172 *===========================================================================*/
2173 int map_memory(endpoint_t sour
, endpoint_t dest
,
2174 vir_bytes virt_s
, vir_bytes virt_d
, vir_bytes length
, int flag
)
2176 /* This is the entry point. This function will be called by handle_memory() when
2177 * VM recieves a map-memory request.
2179 struct vmproc
*vms
, *vmd
;
2180 struct vir_region
*vrs
, *vrd
;
2181 vir_bytes offset_s
, offset_d
;
2185 if(vm_isokendpt(sour
, &p
) != OK
)
2186 panic("map_memory: bad endpoint: %d", sour
);
2188 if(vm_isokendpt(dest
, &p
) != OK
)
2189 panic("map_memory: bad endpoint: %d", dest
);
2192 vrs
= map_lookup(vms
, virt_s
, NULL
);
2194 vrd
= map_lookup(vmd
, virt_d
, NULL
);
2197 /* Linear address -> offset from start of vir region. */
2198 offset_s
= virt_s
- vrs
->vaddr
;
2199 offset_d
= virt_d
- vrd
->vaddr
;
2201 /* Make sure that the range in the source process has been mapped
2202 * to physical memory.
2204 map_handle_memory(vms
, vrs
, offset_s
, length
, 0);
2207 rm_phys_regions(vrd
, offset_d
, length
);
2210 r
= do_map_memory(vms
, vmd
, vrs
, vrd
, offset_s
, offset_d
, length
, flag
);
2215 /*===========================================================================*
2216 * get_clean_phys_region *
2217 *===========================================================================*/
2218 static struct phys_region
*
2219 get_clean_phys_region(struct vmproc
*vmp
, vir_bytes vaddr
, struct vir_region
**ret_region
)
2221 struct vir_region
*region
;
2223 struct phys_region
*ph
;
2227 if(!(region
= map_lookup(vmp
, mapaddr
, &ph
)) || !ph
) {
2228 printf("VM: get_clean_phys_region: 0x%lx not found\n", vaddr
);
2232 assert(mapaddr
>= region
->vaddr
);
2233 assert(mapaddr
< region
->vaddr
+ region
->length
);
2235 /* If it's mapped more than once, make a copy. */
2236 assert(ph
->ph
->refcount
> 0);
2237 if(ph
->ph
->refcount
> 1) {
2238 if(!(ph
= map_clone_ph_block(vmp
, region
,
2240 printf("VM: get_clean_phys_region: ph copy failed\n");
2245 assert(ph
->ph
->refcount
== 1);
2247 *ret_region
= region
;
2252 static int getblock(struct vmproc
*vmp
, u64_t id
, vir_bytes vaddr
, int pages
)
2255 struct phys_region
*ph
;
2256 struct vir_region
*region
;
2262 /* Try to get the yielded block */
2263 blockid
.owner
= vmp
->vm_endpoint
;
2265 avl
= get_yielded_avl(blockid
);
2266 if(!(yb
= yielded_search(avl
, blockid
, AVL_EQUAL
))) {
2270 if(yb
->pages
!= pages
) {
2271 printf("VM: getblock: length mismatch (%d != %d)\n",
2276 phaddr
= yb
->physaddr
;
2278 for(p
= 0; p
< pages
; p
++) {
2279 /* Get the intended phys region, make sure refcount is 1. */
2280 if(!(ph
= get_clean_phys_region(vmp
, vaddr
, ®ion
))) {
2281 printf("VM: getblock: not found for %d\n", vmp
->vm_endpoint
);
2285 assert(ph
->ph
->refcount
== 1);
2287 /* Free the block that is currently there. */
2288 free_mem(ABS2CLICK(ph
->ph
->phys
), 1);
2290 /* Set the phys block to new addr and update pagetable. */
2291 USE(ph
->ph
, ph
->ph
->phys
= phaddr
;);
2292 if(map_ph_writept(vmp
, region
, ph
) != OK
) {
2293 /* Presumably it was mapped, so there is no reason
2294 * updating should fail.
2296 panic("do_get_block: couldn't write pt");
2299 vaddr
+= VM_PAGE_SIZE
;
2300 phaddr
+= VM_PAGE_SIZE
;
2303 /* Forget about the yielded block and free the struct. */
2304 freeyieldednode(yb
, 0);
2309 static int yieldblock(struct vmproc
*vmp
, u64_t id
,
2310 vir_bytes vaddr
, yielded_t
**retyb
, int pages
)
2313 vir_bytes mem_clicks
, v
, p
, new_phaddr
;
2314 struct vir_region
*region
;
2315 struct phys_region
*ph
= NULL
, *prev_ph
= NULL
, *first_ph
= NULL
;
2319 /* Makes no sense if yielded block ID already exists, and
2320 * is likely a serious bug in the caller.
2323 blockid
.owner
= vmp
->vm_endpoint
;
2324 avl
= get_yielded_avl(blockid
);
2325 if(yielded_search(avl
, blockid
, AVL_EQUAL
)) {
2330 if((vaddr
% VM_PAGE_SIZE
) || pages
< 1) return EFAULT
;
2333 for(p
= 0; p
< pages
; p
++) {
2334 if(!(region
= map_lookup(vmp
, v
, &ph
)) || !ph
) {
2335 printf("VM: do_yield_block: not found for %d\n",
2339 if(!(region
->flags
& VR_ANON
)) {
2340 printf("VM: yieldblock: non-anon 0x%lx\n", v
);
2343 if(ph
->ph
->refcount
!= 1) {
2344 printf("VM: do_yield_block: mapped not once for %d\n",
2349 if(ph
->ph
->phys
!= prev_ph
->ph
->phys
+ VM_PAGE_SIZE
) {
2350 printf("VM: physically discontiguous yield\n");
2355 if(!first_ph
) first_ph
= ph
;
2359 /* Make a new block to record the yielding in. */
2360 if(!SLABALLOC(newyb
)) {
2364 assert(!(ph
->ph
->phys
% VM_PAGE_SIZE
));
2366 if((mem_clicks
= alloc_mem(pages
, PAF_CLEAR
)) == NO_MEM
) {
2371 /* Update yielded block info. */
2373 newyb
->id
= blockid
;
2374 newyb
->physaddr
= first_ph
->ph
->phys
;
2375 newyb
->pages
= pages
;
2376 newyb
->younger
= NULL
;);
2378 new_phaddr
= CLICK2ABS(mem_clicks
);
2380 /* Set new phys block to new addr and update pagetable. */
2382 for(p
= 0; p
< pages
; p
++) {
2383 region
= map_lookup(vmp
, v
, &ph
);
2384 assert(region
&& ph
);
2385 assert(ph
->ph
->refcount
== 1);
2387 ph
->ph
->phys
= new_phaddr
;);
2388 if(map_ph_writept(vmp
, region
, ph
) != OK
) {
2389 /* Presumably it was mapped, so there is no reason
2390 * updating should fail.
2392 panic("yield_block: couldn't write pt");
2395 new_phaddr
+= VM_PAGE_SIZE
;
2398 /* Remember yielded block. */
2400 yielded_insert(avl
, newyb
);
2403 /* Add to LRU list too. It's the youngest block. */
2408 lru_youngest
->younger
= newyb
;);
2414 newyb
->older
= lru_youngest
;);
2416 lru_youngest
= newyb
;
2426 /*===========================================================================*
2428 *===========================================================================*/
2429 int do_forgetblocks(message
*m
)
2433 endpoint_t caller
= m
->m_source
;
2435 if(vm_isokendpt(caller
, &n
) != OK
)
2436 panic("do_yield_block: message from strange source: %d",
2441 free_yielded_proc(vmp
);
2446 /*===========================================================================*
2448 *===========================================================================*/
2449 int do_forgetblock(message
*m
)
2453 endpoint_t caller
= m
->m_source
;
2459 if(vm_isokendpt(caller
, &n
) != OK
)
2460 panic("do_yield_block: message from strange source: %d",
2465 id
= make64(m
->VMFB_IDLO
, m
->VMFB_IDHI
);
2468 blockid
.owner
= vmp
->vm_endpoint
;
2469 avl
= get_yielded_avl(blockid
);
2470 if((yb
= yielded_search(avl
, blockid
, AVL_EQUAL
))) {
2471 freeyieldednode(yb
, 1);
2477 /*===========================================================================*
2478 * do_yieldblockgetblock *
2479 *===========================================================================*/
2480 int do_yieldblockgetblock(message
*m
)
2482 u64_t yieldid
, getid
;
2484 endpoint_t caller
= m
->m_source
;
2486 yielded_t
*yb
= NULL
;
2490 if(vm_isokendpt(caller
, &n
) != OK
)
2491 panic("do_yieldblockgetblock: message from strange source: %d",
2496 pages
= m
->VMYBGB_LEN
/ VM_PAGE_SIZE
;
2498 if((m
->VMYBGB_LEN
% VM_PAGE_SIZE
) || pages
< 1) {
2502 printf("vm: non-page-aligned or short block length\n");
2507 yieldid
= make64(m
->VMYBGB_YIELDIDLO
, m
->VMYBGB_YIELDIDHI
);
2508 getid
= make64(m
->VMYBGB_GETIDLO
, m
->VMYBGB_GETIDHI
);
2510 if(cmp64(yieldid
, VM_BLOCKID_NONE
) != 0) {
2511 /* A block was given to yield. */
2512 yieldblock(vmp
, yieldid
, (vir_bytes
) m
->VMYBGB_VADDR
, &yb
,
2516 if(cmp64(getid
, VM_BLOCKID_NONE
) != 0) {
2517 /* A block was given to get. */
2518 r
= getblock(vmp
, getid
, (vir_bytes
) m
->VMYBGB_VADDR
, pages
);
2524 void map_setparent(struct vmproc
*vmp
)
2527 struct vir_region
*vr
;
2528 region_start_iter_least(&vmp
->vm_regions_avl
, &iter
);
2529 while((vr
= region_get_iter(&iter
))) {
2530 USE(vr
, vr
->parent
= vmp
;);
2531 region_incr_iter(&iter
);