5 #include <minix/callnr.h>
6 #include <minix/type.h>
7 #include <minix/config.h>
8 #include <minix/const.h>
9 #include <minix/sysutil.h>
10 #include <minix/syslib.h>
11 #include <minix/debug.h>
12 #include <minix/bitmap.h>
28 #include "sanitycheck.h"
33 PRIVATE yielded_t
*lru_youngest
= NULL
, *lru_oldest
= NULL
;
35 /* Should a physblock be mapped writable? */
36 #define WRITABLE(r, pb) \
37 (((r)->flags & (VR_DIRECT | VR_SHARED)) || \
38 (((r)->flags & VR_WRITABLE) && (pb)->refcount == 1))
40 FORWARD
_PROTOTYPE(int map_new_physblock
, (struct vmproc
*vmp
,
41 struct vir_region
*region
, vir_bytes offset
, vir_bytes length
,
42 phys_bytes what
, u32_t allocflags
, int written
));
44 FORWARD
_PROTOTYPE(int map_ph_writept
, (struct vmproc
*vmp
, struct vir_region
*vr
,
45 struct phys_region
*pr
));
47 FORWARD
_PROTOTYPE(phys_bytes freeyieldednode
, (yielded_t
*node
, int freemem
));
49 FORWARD
_PROTOTYPE(struct vir_region
*map_copy_region
, (struct vmproc
*vmp
, struct vir_region
*vr
));
51 FORWARD
_PROTOTYPE(struct phys_region
*map_clone_ph_block
, (struct vmproc
*vmp
,
52 struct vir_region
*region
, struct phys_region
*ph
, physr_iter
*iter
));
55 FORWARD
_PROTOTYPE(void lrucheck
, (void));
58 PRIVATE
char *map_name(struct vir_region
*vr
)
60 static char name
[100];
62 int type
= vr
->flags
& (VR_ANON
|VR_DIRECT
);
65 typename
= "anonymous";
71 panic("unknown mapping type: %d", type
);
88 tag
= "unknown tag value";
92 sprintf(name
, "%s, %s", typename
, tag
);
97 PUBLIC
void map_printregion(struct vmproc
*vmp
, struct vir_region
*vr
)
100 struct phys_region
*ph
;
101 printf("map_printmap: map_name: %s\n", map_name(vr
));
102 printf("\t%s (len 0x%lx, %dkB), %s\n",
103 arch_map2str(vmp
, vr
->vaddr
), vr
->length
,
104 vr
->length
/1024, map_name(vr
));
105 printf("\t\tphysblocks:\n");
106 physr_start_iter_least(vr
->phys
, &iter
);
107 while((ph
= physr_get_iter(&iter
))) {
108 printf("\t\t@ %s (refs %d): phys 0x%lx len 0x%lx\n",
109 arch_map2str(vmp
, vr
->vaddr
+ ph
->offset
),
110 ph
->ph
->refcount
, ph
->ph
->phys
, ph
->ph
->length
);
111 physr_incr_iter(&iter
);
115 /*===========================================================================*
117 *===========================================================================*/
118 PUBLIC
void map_printmap(vmp
)
121 struct vir_region
*vr
;
123 printf("memory regions in process %d:\n", vmp
->vm_endpoint
);
124 for(vr
= vmp
->vm_regions
; vr
; vr
= vr
->next
) {
125 map_printregion(vmp
, vr
);
131 /*===========================================================================*
132 * map_sanitycheck_pt *
133 *===========================================================================*/
134 PRIVATE
int map_sanitycheck_pt(struct vmproc
*vmp
,
135 struct vir_region
*vr
, struct phys_region
*pr
)
137 struct phys_block
*pb
= pr
->ph
;
141 if(!(vmp
->vm_flags
& VMF_HASPT
))
149 r
= pt_writemap(&vmp
->vm_pt
, vr
->vaddr
+ pr
->offset
,
150 pb
->phys
, pb
->length
, PTF_PRESENT
| PTF_USER
| rw
, WMF_VERIFY
);
153 printf("proc %d phys_region 0x%lx sanity check failed\n",
154 vmp
->vm_endpoint
, pr
->offset
);
155 map_printregion(vmp
, vr
);
161 /*===========================================================================*
163 *===========================================================================*/
164 PUBLIC
void map_sanitycheck(char *file
, int line
)
170 /* Macro for looping over all physical blocks of all regions of
173 #define ALLREGIONS(regioncode, physcode) \
174 for(vmp = vmproc; vmp < &vmproc[VMP_NR]; vmp++) { \
175 struct vir_region *vr; \
176 if(!(vmp->vm_flags & VMF_INUSE)) \
178 for(vr = vmp->vm_regions; vr; vr = vr->next) { \
180 struct phys_region *pr; \
182 physr_start_iter_least(vr->phys, &iter); \
183 while((pr = physr_get_iter(&iter))) { \
185 physr_incr_iter(&iter); \
190 #define MYSLABSANE(s) MYASSERT(slabsane_f(__FILE__, __LINE__, s, sizeof(*(s))))
191 /* Basic pointers check. */
192 ALLREGIONS(MYSLABSANE(vr
),MYSLABSANE(pr
); MYSLABSANE(pr
->ph
);MYSLABSANE(pr
->parent
));
193 ALLREGIONS(/* MYASSERT(vr->parent == vmp) */,MYASSERT(pr
->parent
== vr
););
195 /* Do counting for consistency check. */
196 ALLREGIONS(;,USE(pr
->ph
, pr
->ph
->seencount
= 0;););
197 ALLREGIONS(;,USE(pr
->ph
, pr
->ph
->seencount
++;);
198 if(pr
->ph
->seencount
== 1) {
199 MYASSERT(usedpages_add(pr
->ph
->phys
,
200 pr
->ph
->length
) == OK
);
204 /* Do consistency check. */
205 ALLREGIONS(if(vr
->next
) {
206 MYASSERT(vr
->vaddr
< vr
->next
->vaddr
);
207 MYASSERT(vr
->vaddr
+ vr
->length
<= vr
->next
->vaddr
);
209 MYASSERT(!(vr
->vaddr
% VM_PAGE_SIZE
));,
210 if(pr
->ph
->refcount
!= pr
->ph
->seencount
) {
212 printf("ph in vr 0x%lx: 0x%lx-0x%lx refcount %d "
213 "but seencount %lu\n",
215 pr
->offset
+ pr
->ph
->length
,
216 pr
->ph
->refcount
, pr
->ph
->seencount
);
220 struct phys_region
*others
;
221 if(pr
->ph
->refcount
> 0) {
222 MYASSERT(pr
->ph
->firstregion
);
223 if(pr
->ph
->refcount
== 1) {
224 MYASSERT(pr
->ph
->firstregion
== pr
);
227 MYASSERT(!pr
->ph
->firstregion
);
229 for(others
= pr
->ph
->firstregion
; others
;
230 others
= others
->next_ph_list
) {
232 MYASSERT(others
->ph
== pr
->ph
);
235 MYASSERT(pr
->ph
->refcount
== n_others
);
237 MYASSERT(pr
->ph
->refcount
== pr
->ph
->seencount
);
238 MYASSERT(!(pr
->offset
% VM_PAGE_SIZE
));
239 MYASSERT(!(pr
->ph
->length
% VM_PAGE_SIZE
)););
240 ALLREGIONS(,MYASSERT(map_sanitycheck_pt(vmp
, vr
, pr
) == OK
));
243 #define LRUCHECK lrucheck()
245 PRIVATE
void lrucheck(void)
249 /* list is empty and ok if both ends point to null. */
250 if(!lru_youngest
&& !lru_oldest
)
253 /* if not, both should point to something. */
254 SLABSANE(lru_youngest
);
255 SLABSANE(lru_oldest
);
257 assert(!lru_youngest
->younger
);
258 assert(!lru_oldest
->older
);
260 for(list
= lru_youngest
; list
; list
= list
->older
) {
263 SLABSANE(list
->younger
);
264 assert(list
->younger
->older
== list
);
265 } else assert(list
== lru_youngest
);
267 SLABSANE(list
->older
);
268 assert(list
->older
->younger
== list
);
269 } else assert(list
== lru_oldest
);
273 void blockstats(void)
281 s
= getuptime(&ticks
);
287 for(list
= lru_youngest
; list
; list
= list
->older
) {
293 printf("%d blocks, %dkB; ", blocks
, mem
/1024);
302 /*=========================================================================*
304 *=========================================================================*/
305 PRIVATE
int map_ph_writept(struct vmproc
*vmp
, struct vir_region
*vr
,
306 struct phys_region
*pr
)
309 struct phys_block
*pb
= pr
->ph
;
311 assert(!(vr
->vaddr
% VM_PAGE_SIZE
));
312 assert(!(pb
->length
% VM_PAGE_SIZE
));
313 assert(!(pr
->offset
% VM_PAGE_SIZE
));
314 assert(pb
->refcount
> 0);
321 if(pt_writemap(&vmp
->vm_pt
, vr
->vaddr
+ pr
->offset
,
322 pb
->phys
, pb
->length
, PTF_PRESENT
| PTF_USER
| rw
,
326 WMF_OVERWRITE
) != OK
) {
327 printf("VM: map_writept: pt_writemap failed\n");
332 USE(pr
, pr
->written
= 1;);
338 /*===========================================================================*
340 *===========================================================================*/
341 PRIVATE vir_bytes
region_find_slot(struct vmproc
*vmp
,
342 vir_bytes minv
, vir_bytes maxv
, vir_bytes length
,
343 struct vir_region
**prev
)
345 struct vir_region
*firstregion
= vmp
->vm_regions
, *prevregion
= NULL
;
349 SANITYCHECK(SCL_FUNCTIONS
);
351 /* We must be in paged mode to be able to do this. */
354 /* Length must be reasonable. */
357 /* Special case: allow caller to set maxv to 0 meaning 'I want
358 * it to be mapped in right here.'
361 maxv
= minv
+ length
;
365 printf("region_find_slot: minv 0x%lx and bytes 0x%lx\n",
368 return (vir_bytes
) -1;
372 /* Basic input sanity checks. */
373 assert(!(length
% VM_PAGE_SIZE
));
375 printf("VM: 1 minv: 0x%lx maxv: 0x%lx length: 0x%lx\n",
379 assert(minv
+ length
<= maxv
);
381 #define FREEVRANGE(rangestart, rangeend, foundcode) { \
382 vir_bytes frstart = (rangestart), frend = (rangeend); \
383 frstart = MAX(frstart, minv); \
384 frend = MIN(frend, maxv); \
385 if(frend > frstart && (frend - frstart) >= length) { \
391 /* This is the free virtual address space before the first region. */
392 FREEVRANGE(0, firstregion
? firstregion
->vaddr
: VM_DATATOP
, ;);
395 struct vir_region
*vr
;
396 for(vr
= vmp
->vm_regions
; vr
&& !foundflag
; vr
= vr
->next
) {
397 FREEVRANGE(vr
->vaddr
+ vr
->length
,
398 vr
->next
? vr
->next
->vaddr
: VM_DATATOP
,
404 printf("VM: region_find_slot: no 0x%lx bytes found for %d between 0x%lx and 0x%lx\n",
405 length
, vmp
->vm_endpoint
, minv
, maxv
);
407 return (vir_bytes
) -1;
411 if(prevregion
) assert(prevregion
->vaddr
< startv
);
414 /* However we got it, startv must be in the requested range. */
415 assert(startv
>= minv
);
416 assert(startv
< maxv
);
417 assert(startv
+ length
<= maxv
);
424 /*===========================================================================*
426 *===========================================================================*/
427 PUBLIC
struct vir_region
*map_page_region(vmp
, minv
, maxv
, length
,
428 what
, flags
, mapflags
)
437 struct vir_region
*prevregion
= NULL
, *newregion
;
439 struct phys_region
*ph
;
442 assert(!(length
% VM_PAGE_SIZE
));
444 SANITYCHECK(SCL_FUNCTIONS
);
446 startv
= region_find_slot(vmp
, minv
, maxv
, length
, &prevregion
);
447 if (startv
== (vir_bytes
) -1)
450 /* Now we want a new region. */
451 if(!SLABALLOC(newregion
)) {
452 printf("VM: map_page_region: allocating region failed\n");
456 /* Fill in node details. */
458 newregion
->vaddr
= startv
;
459 newregion
->length
= length
;
460 newregion
->flags
= flags
;
461 newregion
->tag
= VRT_NONE
;
462 newregion
->parent
= vmp
;);
466 printf("VM: map_page_region: allocating phys avl failed\n");
470 USE(newregion
, newregion
->phys
= phavl
;);
472 physr_init(newregion
->phys
);
474 /* If we know what we're going to map to, map it right away. */
475 if(what
!= MAP_NONE
) {
476 assert(!(what
% VM_PAGE_SIZE
));
477 assert(!(startv
% VM_PAGE_SIZE
));
478 assert(!(mapflags
& MF_PREALLOC
));
479 if(map_new_physblock(vmp
, newregion
, 0, length
,
480 what
, PAF_CLEAR
, 0) != OK
) {
481 printf("VM: map_new_physblock failed\n");
483 SLABFREE(newregion
->phys
););
489 if((flags
& VR_ANON
) && (mapflags
& MF_PREALLOC
)) {
490 if(map_handle_memory(vmp
, newregion
, 0, length
, 1) != OK
) {
491 printf("VM: map_page_region: prealloc failed\n");
493 SLABFREE(newregion
->phys
););
501 assert(prevregion
->vaddr
< newregion
->vaddr
);
502 USE(newregion
, newregion
->next
= prevregion
->next
;);
503 USE(prevregion
, prevregion
->next
= newregion
;);
505 USE(newregion
, newregion
->next
= vmp
->vm_regions
;);
506 vmp
->vm_regions
= newregion
;
510 assert(startv
== newregion
->vaddr
);
511 if(newregion
->next
) {
512 assert(newregion
->vaddr
< newregion
->next
->vaddr
);
516 SANITYCHECK(SCL_FUNCTIONS
);
521 /*===========================================================================*
523 *===========================================================================*/
524 PUBLIC
void pb_unreferenced(struct vir_region
*region
, struct phys_region
*pr
)
526 struct phys_block
*pb
;
530 assert(pb
->refcount
> 0);
531 USE(pb
, pb
->refcount
--;);
532 assert(pb
->refcount
>= 0);
534 if(pb
->firstregion
== pr
) {
535 USE(pb
, pb
->firstregion
= pr
->next_ph_list
;);
537 struct phys_region
*others
;
539 for(others
= pb
->firstregion
; others
;
540 others
= others
->next_ph_list
) {
541 assert(others
->ph
== pb
);
542 if(others
->next_ph_list
== pr
) {
543 USE(others
, others
->next_ph_list
= pr
->next_ph_list
;);
548 assert(others
); /* Otherwise, wasn't on the list. */
551 if(pb
->refcount
== 0) {
552 assert(!pb
->firstregion
);
553 if(region
->flags
& VR_ANON
) {
554 free_mem(ABS2CLICK(pb
->phys
),
555 ABS2CLICK(pb
->length
));
556 } else if(region
->flags
& VR_DIRECT
) {
557 ; /* No action required. */
559 panic("strange phys flags");
563 struct phys_region
*others
;
566 for(others
= pb
->firstregion
; others
;
567 others
= others
->next_ph_list
) {
568 if(WRITABLE(region
, others
->ph
)) {
569 if(map_ph_writept(others
->parent
->parent
,
570 others
->parent
, others
) != OK
) {
571 printf("VM: map_ph_writept failed unexpectedly\n");
576 assert(n
== pb
->refcount
);
580 PRIVATE
struct phys_region
*reset_physr_iter(struct vir_region
*region
,
581 physr_iter
*iter
, vir_bytes offset
)
583 struct phys_region
*ph
;
585 physr_start_iter(region
->phys
, iter
, offset
, AVL_EQUAL
);
586 ph
= physr_get_iter(iter
);
588 assert(ph
->offset
== offset
);
593 /*===========================================================================*
595 *===========================================================================*/
596 PRIVATE
int map_subfree(struct vmproc
*vmp
,
597 struct vir_region
*region
, vir_bytes len
)
599 struct phys_region
*pr
, *nextpr
;
605 physr_start_iter_least(region
->phys
, &iter
);
606 while((pr
= physr_get_iter(&iter
))) {
607 struct phys_region
*others
;
608 struct phys_block
*pb
;
612 for(others
= pb
->firstregion
; others
;
613 others
= others
->next_ph_list
) {
614 assert(others
->ph
== pb
);
616 physr_incr_iter(&iter
);
621 physr_start_iter_least(region
->phys
, &iter
);
622 while((pr
= physr_get_iter(&iter
))) {
623 physr_incr_iter(&iter
);
624 if(pr
->offset
>= len
)
626 if(pr
->offset
+ pr
->ph
->length
<= len
) {
627 pb_unreferenced(region
, pr
);
628 physr_remove(region
->phys
, pr
->offset
);
629 physr_start_iter_least(region
->phys
, &iter
);
633 assert(len
> pr
->offset
);
634 assert(len
< pr
->offset
+ pr
->ph
->length
);
635 assert(pr
->ph
->refcount
> 0);
636 sublen
= len
- pr
->offset
;
637 assert(!(sublen
% VM_PAGE_SIZE
));
638 assert(sublen
< pr
->ph
->length
);
639 if(pr
->ph
->refcount
> 1) {
641 if(!(pr
= map_clone_ph_block(vmp
, region
,
645 assert(pr
->ph
->refcount
== 1);
646 if(!(region
->flags
& VR_DIRECT
)) {
647 free_mem(ABS2CLICK(pr
->ph
->phys
), ABS2CLICK(sublen
));
649 USE(pr
, pr
->offset
+= sublen
;);
651 pr
->ph
->phys
+= sublen
;
652 pr
->ph
->length
-= sublen
;);
653 assert(!(pr
->offset
% VM_PAGE_SIZE
));
654 assert(!(pr
->ph
->phys
% VM_PAGE_SIZE
));
655 assert(!(pr
->ph
->length
% VM_PAGE_SIZE
));
662 /*===========================================================================*
664 *===========================================================================*/
665 PRIVATE
int map_free(struct vmproc
*vmp
, struct vir_region
*region
)
669 if((r
=map_subfree(vmp
, region
, region
->length
)) != OK
) {
670 printf("%d\n", __LINE__
);
675 SLABFREE(region
->phys
););
682 /*===========================================================================*
683 * free_yielded_proc *
684 *===========================================================================*/
685 PRIVATE vir_bytes
free_yielded_proc(struct vmproc
*vmp
)
691 SANITYCHECK(SCL_FUNCTIONS
);
693 /* Free associated regions. */
694 while((yb
= yielded_search_least(&vmp
->vm_yielded_blocks
))) {
696 total
+= freeyieldednode(yb
, 1);
700 yielded_init(&vmp
->vm_yielded_blocks
);
706 PRIVATE phys_bytes
freeyieldednode(yielded_t
*node
, int freemem
)
708 yielded_t
*older
, *younger
, *removed
;
718 younger
= node
->younger
;
723 assert(younger
->older
== node
);
724 USE(younger
, younger
->older
= node
->older
;);
726 assert(node
== lru_youngest
);
727 lru_youngest
= node
->older
;
732 assert(older
->younger
== node
);
733 USE(older
, older
->younger
= node
->younger
;);
735 assert(node
== lru_oldest
);
736 lru_oldest
= node
->younger
;
743 if(vm_isokendpt(node
->owner
, &p
) != OK
)
744 panic("out of date owner of yielded block %d", node
->owner
);
746 removed
= yielded_remove(&vmproc
[p
].vm_yielded_blocks
, node
->id
);
747 assert(removed
== node
);
749 /* Free associated memory if requested. */
752 free_mem(ABS2CLICK(node
->addr
), ABS2CLICK(node
->len
));
763 /*========================================================================*
765 *========================================================================*/
766 PUBLIC vir_bytes
free_yielded(vir_bytes max_bytes
)
769 /* PRIVATE yielded_t *lru_youngest = NULL, *lru_oldest = NULL; */
773 while(freed
< max_bytes
&& lru_oldest
) {
774 SLABSANE(lru_oldest
);
775 freed
+= freeyieldednode(lru_oldest
, 1);
782 /*========================================================================*
784 *========================================================================*/
785 PUBLIC
int map_free_proc(vmp
)
788 struct vir_region
*r
, *nextr
;
790 for(r
= vmp
->vm_regions
; r
; r
= nextr
) {
792 SANITYCHECK(SCL_DETAIL
);
797 vmp
->vm_regions
= nextr
; /* For sanity checks. */
801 SANITYCHECK(SCL_DETAIL
);
803 vmp
->vm_regions
= NULL
;
805 /* Free associated yielded blocks. */
806 free_yielded_proc(vmp
);
808 SANITYCHECK(SCL_FUNCTIONS
);
813 /*===========================================================================*
815 *===========================================================================*/
816 PUBLIC
struct vir_region
*map_lookup(vmp
, offset
)
820 struct vir_region
*r
;
822 SANITYCHECK(SCL_FUNCTIONS
);
825 panic("process has no regions: %d", vmp
->vm_endpoint
);
827 for(r
= vmp
->vm_regions
; r
; r
= r
->next
) {
828 if(offset
>= r
->vaddr
&& offset
< r
->vaddr
+ r
->length
)
832 SANITYCHECK(SCL_FUNCTIONS
);
837 PRIVATE u32_t
vrallocflags(u32_t flags
)
839 u32_t allocflags
= 0;
841 if(flags
& VR_PHYS64K
)
842 allocflags
|= PAF_ALIGN64K
;
843 if(flags
& VR_LOWER16MB
)
844 allocflags
|= PAF_LOWER16MB
;
845 if(flags
& VR_LOWER1MB
)
846 allocflags
|= PAF_LOWER1MB
;
847 if(flags
& VR_CONTIG
)
848 allocflags
|= PAF_CONTIG
;
853 /*===========================================================================*
854 * map_new_physblock *
855 *===========================================================================*/
856 PRIVATE
int map_new_physblock(vmp
, region
, start_offset
, length
,
857 what_mem
, allocflags
, written
)
859 struct vir_region
*region
;
860 vir_bytes start_offset
;
866 struct memlist
*memlist
, given
, *ml
;
868 vir_bytes mapped
= 0;
869 vir_bytes offset
= start_offset
;
871 SANITYCHECK(SCL_FUNCTIONS
);
873 assert(!(length
% VM_PAGE_SIZE
));
875 if((region
->flags
& VR_CONTIG
) &&
876 (start_offset
> 0 || length
< region
->length
)) {
877 printf("VM: map_new_physblock: non-full allocation requested\n");
881 /* Memory for new physical block. */
882 if(what_mem
== MAP_NONE
) {
883 allocflags
|= vrallocflags(region
->flags
);
885 if(!(memlist
= alloc_mem_in_list(length
, allocflags
))) {
886 printf("map_new_physblock: couldn't allocate\n");
891 given
.phys
= what_mem
;
892 given
.length
= length
;
896 assert(given
.length
);
901 for(ml
= memlist
; ml
; ml
= ml
->next
) {
902 struct phys_region
*newphysr
= NULL
;
903 struct phys_block
*newpb
= NULL
;
905 /* Allocate things necessary for this chunk of memory. */
906 if(!SLABALLOC(newphysr
) || !SLABALLOC(newpb
)) {
907 printf("map_new_physblock: no memory for the ph slabs\n");
908 if(newphysr
) SLABFREE(newphysr
);
909 if(newpb
) SLABFREE(newpb
);
914 /* New physical block. */
915 assert(!(ml
->phys
% VM_PAGE_SIZE
));
918 newpb
->phys
= ml
->phys
;
920 newpb
->length
= ml
->length
;
921 newpb
->firstregion
= newphysr
;);
923 /* New physical region. */
925 newphysr
->offset
= offset
;
926 newphysr
->ph
= newpb
;
927 newphysr
->parent
= region
;
928 /* No other references to this block. */
929 newphysr
->next_ph_list
= NULL
;);
932 USE(newphysr
, newphysr
->written
= written
;);
935 /* Update pagetable. */
936 if(map_ph_writept(vmp
, region
, newphysr
) != OK
) {
937 printf("map_new_physblock: map_ph_writept failed\n");
942 physr_insert(region
->phys
, newphysr
);
944 offset
+= ml
->length
;
945 mapped
+= ml
->length
;
950 offset
= start_offset
;
951 /* Things did not go well. Undo everything. */
952 for(ml
= memlist
; ml
; ml
= ml
->next
) {
953 struct phys_region
*physr
;
954 if((physr
= physr_search(region
->phys
, offset
,
956 assert(physr
->ph
->refcount
== 1);
957 pb_unreferenced(region
, physr
);
958 physr_remove(region
->phys
, physr
->offset
);
961 offset
+= ml
->length
;
963 } else assert(mapped
== length
);
965 /* Always clean up the memlist itself, even if everything
966 * worked we're not using the memlist nodes any more. And
967 * the memory they reference is either freed above or in use.
969 free_mem_list(memlist
, 0);
972 SANITYCHECK(SCL_FUNCTIONS
);
977 /*===========================================================================*
978 * map_clone_ph_block *
979 *===========================================================================*/
980 PRIVATE
struct phys_region
*map_clone_ph_block(vmp
, region
, ph
, iter
)
982 struct vir_region
*region
;
983 struct phys_region
*ph
;
986 vir_bytes offset
, length
;
989 struct phys_region
*newpr
;
992 written
= ph
->written
;
994 SANITYCHECK(SCL_FUNCTIONS
);
996 /* Warning: this function will free the passed
997 * phys_region *ph and replace it (in the same offset)
998 * with one or more others! So both the pointer to it
999 * and any iterators over the phys_regions in the vir_region
1000 * will be invalid on successful return. (Iterators over
1001 * the vir_region could be invalid on unsuccessful return too.)
1004 /* This is only to be done if there is more than one copy. */
1005 assert(ph
->ph
->refcount
> 1);
1007 /* This function takes a physical block, copies its contents
1008 * into newly allocated memory, and replaces the single physical
1009 * block by one or more physical blocks with refcount 1 with the
1010 * same contents as the original. In other words, a fragmentable
1011 * version of map_copy_ph_block().
1014 /* Remember where and how much. */
1015 offset
= ph
->offset
;
1016 length
= ph
->ph
->length
;
1017 physaddr
= ph
->ph
->phys
;
1019 /* Now unlink the original physical block so we can replace
1025 assert(ph
->ph
->refcount
> 1);
1026 pb_unreferenced(region
, ph
);
1027 assert(ph
->ph
->refcount
>= 1);
1028 physr_remove(region
->phys
, offset
);
1031 SANITYCHECK(SCL_DETAIL
);
1033 /* Put new free memory in. */
1034 allocflags
= vrallocflags(region
->flags
);
1035 assert(!(allocflags
& PAF_CONTIG
));
1036 assert(!(allocflags
& PAF_CLEAR
));
1038 if(map_new_physblock(vmp
, region
, offset
, length
,
1039 MAP_NONE
, allocflags
, written
) != OK
) {
1040 /* XXX original range now gone. */
1041 printf("VM: map_clone_ph_block: map_new_physblock failed.\n");
1045 /* Copy the block to the new memory.
1046 * Can only fail if map_new_physblock didn't do what we asked.
1048 if(copy_abs2region(physaddr
, region
, offset
, length
) != OK
)
1049 panic("copy_abs2region failed, no good reason for that");
1051 newpr
= physr_search(region
->phys
, offset
, AVL_EQUAL
);
1053 assert(newpr
->offset
== offset
);
1056 physr_start_iter(region
->phys
, iter
, offset
, AVL_EQUAL
);
1057 assert(physr_get_iter(iter
) == newpr
);
1060 SANITYCHECK(SCL_FUNCTIONS
);
1066 /*===========================================================================*
1068 *===========================================================================*/
1069 PUBLIC
int map_pf(vmp
, region
, offset
, write
)
1071 struct vir_region
*region
;
1076 struct phys_region
*ph
;
1079 assert(offset
>= 0);
1080 assert(offset
< region
->length
);
1082 assert(region
->flags
& VR_ANON
);
1083 assert(!(region
->vaddr
% VM_PAGE_SIZE
));
1085 virpage
= offset
- offset
% VM_PAGE_SIZE
;
1087 SANITYCHECK(SCL_FUNCTIONS
);
1089 if((ph
= physr_search(region
->phys
, offset
, AVL_LESS_EQUAL
)) &&
1090 (ph
->offset
<= offset
&& offset
< ph
->offset
+ ph
->ph
->length
)) {
1091 phys_bytes blockoffset
= ph
->offset
;
1092 /* Pagefault in existing block. Do copy-on-write. */
1094 assert(region
->flags
& VR_WRITABLE
);
1095 assert(ph
->ph
->refcount
> 0);
1097 if(WRITABLE(region
, ph
->ph
)) {
1098 r
= map_ph_writept(vmp
, region
, ph
);
1100 printf("map_ph_writept failed\n");
1102 if(ph
->ph
->refcount
> 0
1103 && ph
->ph
->share_flag
!= PBSH_COW
) {
1104 printf("VM: write RO mapped pages.\n");
1107 if(!map_clone_ph_block(vmp
, region
, ph
, NULL
))
1112 /* Pagefault in non-existing block. Map in new block. */
1113 if(map_new_physblock(vmp
, region
, virpage
,
1114 VM_PAGE_SIZE
, MAP_NONE
, PAF_CLEAR
, 0) != OK
) {
1115 printf("map_new_physblock failed\n");
1120 SANITYCHECK(SCL_FUNCTIONS
);
1123 printf("VM: map_pf: failed (%d)\n", r
);
1128 if(OK
!= pt_checkrange(&vmp
->vm_pt
, region
->vaddr
+virpage
,
1129 VM_PAGE_SIZE
, write
)) {
1130 panic("map_pf: pt_checkrange failed: %d", r
);
1137 /*===========================================================================*
1138 * map_handle_memory *
1139 *===========================================================================*/
1140 PUBLIC
int map_handle_memory(vmp
, region
, offset
, length
, write
)
1142 struct vir_region
*region
;
1143 vir_bytes offset
, length
;
1146 struct phys_region
*physr
, *nextphysr
;
1150 #define FREE_RANGE_HERE(er1, er2) { \
1151 struct phys_region *r1 = (er1), *r2 = (er2); \
1152 vir_bytes start = offset, end = offset + length; \
1154 start = MAX(start, r1->offset + r1->ph->length); } \
1156 end = MIN(end, r2->offset); } \
1159 SANITYCHECK(SCL_DETAIL); \
1160 if(map_new_physblock(vmp, region, start, \
1161 end-start, MAP_NONE, PAF_CLEAR, 0) != OK) { \
1162 SANITYCHECK(SCL_DETAIL); \
1169 SANITYCHECK(SCL_FUNCTIONS
);
1171 assert(region
->flags
& VR_ANON
);
1172 assert(!(region
->vaddr
% VM_PAGE_SIZE
));
1173 assert(!(offset
% VM_PAGE_SIZE
));
1174 assert(!(length
% VM_PAGE_SIZE
));
1175 assert(!write
|| (region
->flags
& VR_WRITABLE
));
1177 physr_start_iter(region
->phys
, &iter
, offset
, AVL_LESS_EQUAL
);
1178 physr
= physr_get_iter(&iter
);
1181 physr_start_iter(region
->phys
, &iter
, offset
, AVL_GREATER_EQUAL
);
1182 physr
= physr_get_iter(&iter
);
1185 FREE_RANGE_HERE(NULL
, physr
);
1188 physr
= reset_physr_iter(region
, &iter
, physr
->offset
);
1189 if(physr
->offset
+ physr
->ph
->length
<= offset
) {
1190 physr_incr_iter(&iter
);
1191 physr
= physr_get_iter(&iter
);
1193 FREE_RANGE_HERE(NULL
, physr
);
1195 physr
= reset_physr_iter(region
, &iter
,
1204 SANITYCHECK(SCL_DETAIL
);
1207 assert(physr
->ph
->refcount
> 0);
1208 if(!WRITABLE(region
, physr
->ph
)) {
1209 if(!(physr
= map_clone_ph_block(vmp
, region
,
1211 printf("VM: map_handle_memory: no copy\n");
1216 SANITYCHECK(SCL_DETAIL
);
1217 if((r
=map_ph_writept(vmp
, region
, physr
)) != OK
) {
1218 printf("VM: map_ph_writept failed\n");
1222 SANITYCHECK(SCL_DETAIL
);
1226 SANITYCHECK(SCL_DETAIL
);
1227 physr_incr_iter(&iter
);
1228 nextphysr
= physr_get_iter(&iter
);
1229 FREE_RANGE_HERE(physr
, nextphysr
);
1230 SANITYCHECK(SCL_DETAIL
);
1232 if(nextphysr
->offset
>= offset
+ length
)
1234 nextphysr
= reset_physr_iter(region
, &iter
,
1240 SANITYCHECK(SCL_FUNCTIONS
);
1244 printf("region start at 0x%lx offset 0x%lx len 0x%lx write %d\n",
1245 region
->vaddr
, offset
, length
, write
);
1246 printf("no changes in map_handle_memory\n");
1252 if(OK
!= pt_checkrange(&vmp
->vm_pt
, region
->vaddr
+offset
, length
, write
)) {
1253 printf("handle mem %s-", arch_map2str(vmp
, region
->vaddr
+offset
));
1254 printf("%s failed\n", arch_map2str(vmp
, region
->vaddr
+offset
+length
));
1255 map_printregion(vmp
, region
);
1256 panic("checkrange failed");
1264 static int countregions(struct vir_region
*vr
)
1267 struct phys_region
*ph
;
1269 physr_start_iter_least(vr
->phys
, &iter
);
1270 while((ph
= physr_get_iter(&iter
))) {
1272 physr_incr_iter(&iter
);
1278 /*===========================================================================*
1280 *===========================================================================*/
1281 PRIVATE
struct vir_region
*map_copy_region(struct vmproc
*vmp
, struct vir_region
*vr
)
1283 /* map_copy_region creates a complete copy of the vir_region
1284 * data structure, linking in the same phys_blocks directly,
1285 * but all in limbo, i.e., the caller has to link the vir_region
1286 * to a process. Therefore it doesn't increase the refcount in
1287 * the phys_block; the caller has to do this once it's linked.
1288 * The reason for this is to keep the sanity checks working
1289 * within this function.
1291 struct vir_region
*newvr
;
1292 struct phys_region
*ph
;
1297 cr
= countregions(vr
);
1300 if(!SLABALLOC(newvr
))
1310 newvr
->phys
= phavl
;
1312 physr_init(newvr
->phys
);
1314 physr_start_iter_least(vr
->phys
, &iter
);
1315 while((ph
= physr_get_iter(&iter
))) {
1316 struct phys_region
*newph
;
1317 if(!SLABALLOC(newph
)) {
1318 map_free(vmp
, newvr
);
1323 newph
->next_ph_list
= NULL
;
1324 newph
->parent
= newvr
;
1325 newph
->offset
= ph
->offset
;);
1327 USE(newph
, newph
->written
= 0;);
1329 physr_insert(newvr
->phys
, newph
);
1331 assert(countregions(vr
) == cr
);
1333 physr_incr_iter(&iter
);
1337 assert(countregions(vr
) == countregions(newvr
));
1343 /*===========================================================================*
1345 *===========================================================================*/
1346 PUBLIC
int copy_abs2region(phys_bytes abs
, struct vir_region
*destregion
,
1347 phys_bytes offset
, phys_bytes len
)
1351 assert(destregion
->phys
);
1353 phys_bytes sublen
, suboffset
;
1354 struct phys_region
*ph
;
1356 assert(destregion
->phys
);
1357 if(!(ph
= physr_search(destregion
->phys
, offset
, AVL_LESS_EQUAL
))) {
1358 printf("VM: copy_abs2region: no phys region found (1).\n");
1361 assert(ph
->offset
<= offset
);
1362 if(ph
->offset
+ph
->ph
->length
<= offset
) {
1363 printf("VM: copy_abs2region: no phys region found (2).\n");
1366 suboffset
= offset
- ph
->offset
;
1367 assert(suboffset
< ph
->ph
->length
);
1369 if(sublen
> ph
->ph
->length
- suboffset
)
1370 sublen
= ph
->ph
->length
- suboffset
;
1371 assert(suboffset
+ sublen
<= ph
->ph
->length
);
1372 if(ph
->ph
->refcount
!= 1) {
1373 printf("VM: copy_abs2region: refcount not 1.\n");
1377 if(sys_abscopy(abs
, ph
->ph
->phys
+ suboffset
, sublen
) != OK
) {
1378 printf("VM: copy_abs2region: abscopy failed.\n");
1389 /*=========================================================================*
1391 *=========================================================================*/
1392 PUBLIC
int map_writept(struct vmproc
*vmp
)
1394 struct vir_region
*vr
;
1395 struct phys_region
*ph
;
1398 for(vr
= vmp
->vm_regions
; vr
; vr
= vr
->next
) {
1400 physr_start_iter_least(vr
->phys
, &iter
);
1401 while((ph
= physr_get_iter(&iter
))) {
1402 physr_incr_iter(&iter
);
1404 /* If this phys block is shared as SMAP, then do
1405 * not update the page table. */
1406 if(ph
->ph
->refcount
> 1
1407 && ph
->ph
->share_flag
== PBSH_SMAP
) {
1411 if((r
=map_ph_writept(vmp
, vr
, ph
)) != OK
) {
1412 printf("VM: map_writept: failed\n");
1421 /*========================================================================*
1423 *========================================================================*/
1424 PUBLIC
int map_proc_copy(dst
, src
)
1428 struct vir_region
*vr
, *prevvr
= NULL
;
1429 dst
->vm_regions
= NULL
;
1431 SANITYCHECK(SCL_FUNCTIONS
);
1434 for(vr
= src
->vm_regions
; vr
; vr
= vr
->next
) {
1435 physr_iter iter_orig
, iter_new
;
1436 struct vir_region
*newvr
;
1437 struct phys_region
*orig_ph
, *new_ph
;
1438 if(!(newvr
= map_copy_region(dst
, vr
))) {
1442 USE(newvr
, newvr
->parent
= dst
;);
1443 if(prevvr
) { USE(prevvr
, prevvr
->next
= newvr
;); }
1444 else { dst
->vm_regions
= newvr
; }
1445 physr_start_iter_least(vr
->phys
, &iter_orig
);
1446 physr_start_iter_least(newvr
->phys
, &iter_new
);
1447 while((orig_ph
= physr_get_iter(&iter_orig
))) {
1448 struct phys_block
*pb
;
1449 new_ph
= physr_get_iter(&iter_new
);
1450 /* Check two physregions both are nonnull,
1451 * are different, and match physblocks.
1455 assert(orig_ph
!= new_ph
);
1457 assert(pb
== new_ph
->ph
);
1459 /* Link in new physregion. */
1460 assert(!new_ph
->next_ph_list
);
1461 USE(new_ph
, new_ph
->next_ph_list
= pb
->firstregion
;);
1462 USE(pb
, pb
->firstregion
= new_ph
;);
1464 /* Increase phys block refcount */
1465 assert(pb
->refcount
> 0);
1466 USE(pb
, pb
->refcount
++;);
1467 assert(pb
->refcount
> 1);
1469 /* If the phys block has been shared as SMAP,
1470 * do the regular copy. */
1471 if(pb
->refcount
> 2 && pb
->share_flag
== PBSH_SMAP
) {
1472 map_clone_ph_block(dst
, newvr
,new_ph
,
1475 USE(pb
, pb
->share_flag
= PBSH_COW
;);
1478 /* Get next new physregion */
1479 physr_incr_iter(&iter_orig
);
1480 physr_incr_iter(&iter_new
);
1482 assert(!physr_get_iter(&iter_new
));
1489 SANITYCHECK(SCL_FUNCTIONS
);
1493 /*========================================================================*
1495 *========================================================================*/
1496 PUBLIC
struct vir_region
*map_proc_kernel(struct vmproc
*vmp
)
1498 struct vir_region
*vr
;
1500 /* We assume these are the first regions to be mapped to
1501 * make the function a bit simpler (free all regions on error).
1503 assert(!vmp
->vm_regions
);
1504 assert(vmproc
[VMP_SYSTEM
].vm_flags
& VMF_INUSE
);
1505 assert(!(KERNEL_TEXT
% VM_PAGE_SIZE
));
1506 assert(!(KERNEL_TEXT_LEN
% VM_PAGE_SIZE
));
1507 assert(!(KERNEL_DATA
% VM_PAGE_SIZE
));
1508 assert(!(KERNEL_DATA_LEN
% VM_PAGE_SIZE
));
1510 if(!(vr
= map_page_region(vmp
, KERNEL_TEXT
, 0, KERNEL_TEXT_LEN
,
1511 KERNEL_TEXT
, VR_DIRECT
| VR_WRITABLE
| VR_NOPF
, 0)) ||
1512 !(vr
= map_page_region(vmp
, KERNEL_DATA
, 0, KERNEL_DATA_LEN
,
1513 KERNEL_DATA
, VR_DIRECT
| VR_WRITABLE
| VR_NOPF
, 0))) {
1518 return vr
; /* Return pointer not useful, just non-NULL. */
1521 /*========================================================================*
1522 * map_region_extend *
1523 *========================================================================*/
1524 PUBLIC
int map_region_extend(struct vmproc
*vmp
, struct vir_region
*vr
,
1530 assert(vr
->flags
& VR_ANON
);
1531 assert(!(delta
% VM_PAGE_SIZE
));
1533 if(!delta
) return OK
;
1534 end
= vr
->vaddr
+ vr
->length
;
1535 assert(end
>= vr
->vaddr
);
1537 if(end
+ delta
<= end
) {
1538 printf("VM: strange delta 0x%lx\n", delta
);
1542 if(!vr
->next
|| end
+ delta
<= vr
->next
->vaddr
) {
1543 USE(vr
, vr
->length
+= delta
;);
1552 /*========================================================================*
1553 * map_region_shrink *
1554 *========================================================================*/
1555 PUBLIC
int map_region_shrink(struct vir_region
*vr
, vir_bytes delta
)
1558 assert(vr
->flags
& VR_ANON
);
1559 assert(!(delta
% VM_PAGE_SIZE
));
1562 printf("VM: ignoring region shrink\n");
1568 PUBLIC
struct vir_region
*map_region_lookup_tag(vmp
, tag
)
1572 struct vir_region
*vr
;
1574 for(vr
= vmp
->vm_regions
; vr
; vr
= vr
->next
)
1581 PUBLIC
void map_region_set_tag(struct vir_region
*vr
, u32_t tag
)
1583 USE(vr
, vr
->tag
= tag
;);
1586 PUBLIC u32_t
map_region_get_tag(struct vir_region
*vr
)
1591 /*========================================================================*
1592 * map_unmap_region *
1593 *========================================================================*/
1594 PUBLIC
int map_unmap_region(struct vmproc
*vmp
, struct vir_region
*region
,
1597 /* Shrink the region by 'len' bytes, from the start. Unreference
1598 * memory it used to reference if any.
1600 struct vir_region
*r
, *nextr
, *prev
= NULL
;
1601 vir_bytes regionstart
;
1603 SANITYCHECK(SCL_FUNCTIONS
);
1605 for(r
= vmp
->vm_regions
; r
; r
= r
->next
) {
1612 SANITYCHECK(SCL_DETAIL
);
1615 panic("map_unmap_region: region not found");
1617 if(len
> r
->length
|| (len
% VM_PAGE_SIZE
)) {
1618 printf("VM: bogus length 0x%lx\n", len
);
1622 if(!(r
->flags
& (VR_ANON
|VR_DIRECT
))) {
1623 printf("VM: only unmap anonymous or direct memory\n");
1627 regionstart
= r
->vaddr
;
1629 if(len
== r
->length
) {
1630 /* Whole region disappears. Unlink and free it. */
1632 vmp
->vm_regions
= r
->next
;
1634 USE(prev
, prev
->next
= r
->next
;);
1638 struct phys_region
*pr
;
1640 /* Region shrinks. First unreference its memory
1641 * and then shrink the region.
1643 map_subfree(vmp
, r
, len
);
1647 physr_start_iter_least(r
->phys
, &iter
);
1649 /* vaddr has increased; to make all the phys_regions
1650 * point to the same addresses, make them shrink by the
1653 while((pr
= physr_get_iter(&iter
))) {
1654 assert(pr
->offset
>= len
);
1655 USE(pr
, pr
->offset
-= len
;);
1656 physr_incr_iter(&iter
);
1660 SANITYCHECK(SCL_DETAIL
);
1662 if(pt_writemap(&vmp
->vm_pt
, regionstart
,
1663 MAP_NONE
, len
, 0, WMF_OVERWRITE
) != OK
) {
1664 printf("VM: map_unmap_region: pt_writemap failed\n");
1668 SANITYCHECK(SCL_FUNCTIONS
);
1673 /*========================================================================*
1675 *========================================================================*/
1676 PUBLIC
int map_remap(struct vmproc
*dvmp
, vir_bytes da
, size_t size
,
1677 struct vir_region
*region
, vir_bytes
*r
)
1679 struct vir_region
*vr
, *prev
;
1680 struct phys_region
*ph
;
1681 vir_bytes startv
, dst_addr
;
1684 SANITYCHECK(SCL_FUNCTIONS
);
1686 assert(region
->flags
& VR_SHARED
);
1688 /* da is handled differently */
1690 dst_addr
= dvmp
->vm_stacktop
;
1693 dst_addr
= arch_vir2map(dvmp
, dst_addr
);
1696 /* round up to page size */
1697 assert(!(size
% VM_PAGE_SIZE
));
1698 startv
= region_find_slot(dvmp
, dst_addr
, VM_DATATOP
, size
, &prev
);
1699 if (startv
== (vir_bytes
) -1) {
1700 printf("map_remap: search 0x%x...\n", dst_addr
);
1704 /* when the user specifies the address, we cannot change it */
1705 if (da
&& (startv
!= dst_addr
))
1708 vr
= map_copy_region(dvmp
, region
);
1715 vr
->flags
= region
->flags
;
1717 vr
->parent
= dvmp
;);
1718 assert(vr
->flags
& VR_SHARED
);
1722 vr
->next
= prev
->next
;);
1723 USE(prev
, prev
->next
= vr
;);
1726 vr
->next
= dvmp
->vm_regions
;);
1727 dvmp
->vm_regions
= vr
;
1730 physr_start_iter_least(vr
->phys
, &iter
);
1731 while((ph
= physr_get_iter(&iter
))) {
1732 struct phys_block
*pb
= ph
->ph
;
1733 assert(!ph
->next_ph_list
);
1734 USE(ph
, ph
->next_ph_list
= pb
->firstregion
;);
1735 USE(pb
, pb
->firstregion
= ph
;);
1736 USE(pb
, pb
->refcount
++;);
1737 if(map_ph_writept(dvmp
, vr
, ph
) != OK
) {
1738 panic("map_remap: map_ph_writept failed");
1740 physr_incr_iter(&iter
);
1745 SANITYCHECK(SCL_FUNCTIONS
);
1750 /*========================================================================*
1752 *========================================================================*/
1753 PUBLIC
int map_get_phys(struct vmproc
*vmp
, vir_bytes addr
, phys_bytes
*r
)
1755 struct vir_region
*vr
;
1756 struct phys_region
*ph
;
1759 if (!(vr
= map_lookup(vmp
, addr
)) ||
1760 (vr
->vaddr
!= addr
))
1763 if (!(vr
->flags
& VR_SHARED
))
1766 physr_start_iter_least(vr
->phys
, &iter
);
1767 ph
= physr_get_iter(&iter
);
1777 /*========================================================================*
1779 *========================================================================*/
1780 PUBLIC
int map_get_ref(struct vmproc
*vmp
, vir_bytes addr
, u8_t
*cnt
)
1782 struct vir_region
*vr
;
1783 struct phys_region
*ph
;
1786 if (!(vr
= map_lookup(vmp
, addr
)) ||
1787 (vr
->vaddr
!= addr
))
1790 if (!(vr
->flags
& VR_SHARED
))
1793 physr_start_iter_least(vr
->phys
, &iter
);
1794 ph
= physr_get_iter(&iter
);
1799 *cnt
= ph
->ph
->refcount
;
1804 /*========================================================================*
1806 *========================================================================*/
1807 PUBLIC
void get_usage_info(struct vmproc
*vmp
, struct vm_usage_info
*vui
)
1809 struct vir_region
*vr
;
1811 struct phys_region
*ph
;
1814 memset(vui
, 0, sizeof(*vui
));
1816 for(vr
= vmp
->vm_regions
; vr
; vr
= vr
->next
) {
1817 physr_start_iter_least(vr
->phys
, &iter
);
1818 while((ph
= physr_get_iter(&iter
))) {
1819 len
= ph
->ph
->length
;
1821 /* All present pages are counted towards the total. */
1822 vui
->vui_total
+= len
;
1824 if (ph
->ph
->refcount
> 1) {
1825 /* Any page with a refcount > 1 is common. */
1826 vui
->vui_common
+= len
;
1828 /* Any common, non-COW page is shared. */
1829 if (vr
->flags
& VR_SHARED
||
1830 ph
->ph
->share_flag
== PBSH_SMAP
)
1831 vui
->vui_shared
+= len
;
1833 physr_incr_iter(&iter
);
1838 /*===========================================================================*
1840 *===========================================================================*/
1841 PUBLIC
int get_region_info(struct vmproc
*vmp
, struct vm_region_info
*vri
,
1842 int max
, vir_bytes
*nextp
)
1844 struct vir_region
*vr
;
1852 for(vr
= vmp
->vm_regions
; vr
; vr
= vr
->next
)
1853 if (vr
->vaddr
>= next
) break;
1857 for(count
= 0; vr
&& count
< max
; vr
= vr
->next
, count
++, vri
++) {
1858 vri
->vri_addr
= arch_map2info(vmp
, vr
->vaddr
, &vri
->vri_seg
,
1860 vri
->vri_length
= vr
->length
;
1862 /* "AND" the provided protection with per-page protection. */
1863 if (!(vr
->flags
& VR_WRITABLE
))
1864 vri
->vri_prot
&= ~PROT_WRITE
;
1866 vri
->vri_flags
= (vr
->flags
& VR_SHARED
) ? MAP_SHARED
: 0;
1868 next
= vr
->vaddr
+ vr
->length
;
1875 /*========================================================================*
1876 * regionprintstats *
1877 *========================================================================*/
1878 PUBLIC
void printregionstats(struct vmproc
*vmp
)
1880 struct vir_region
*vr
;
1881 struct phys_region
*pr
;
1883 vir_bytes used
= 0, weighted
= 0;
1885 for(vr
= vmp
->vm_regions
; vr
; vr
= vr
->next
) {
1886 if(vr
->flags
& VR_DIRECT
)
1888 physr_start_iter_least(vr
->phys
, &iter
);
1889 while((pr
= physr_get_iter(&iter
))) {
1890 physr_incr_iter(&iter
);
1891 used
+= pr
->ph
->length
;
1892 weighted
+= pr
->ph
->length
/ pr
->ph
->refcount
;
1896 printf("%6dkB %6dkB\n", used
/1024, weighted
/1024);
1901 /*===========================================================================*
1903 *===========================================================================*/
1904 PRIVATE
int do_map_memory(struct vmproc
*vms
, struct vmproc
*vmd
,
1905 struct vir_region
*vrs
, struct vir_region
*vrd
,
1906 vir_bytes offset_s
, vir_bytes offset_d
,
1907 vir_bytes length
, int flag
)
1909 struct phys_region
*prs
;
1910 struct phys_region
*newphysr
;
1911 struct phys_block
*pb
;
1913 u32_t pt_flag
= PTF_PRESENT
| PTF_USER
;
1916 /* Search for the first phys region in the source process. */
1917 physr_start_iter(vrs
->phys
, &iter
, offset_s
, AVL_EQUAL
);
1918 prs
= physr_get_iter(&iter
);
1920 panic("do_map_memory: no aligned phys region: %d", 0);
1922 /* flag: 0 -> read-only
1924 * -1 -> share as COW, so read-only
1927 pt_flag
|= PTF_WRITE
;
1929 /* Map phys blocks in the source process to the destination process. */
1930 end
= offset_d
+ length
;
1931 while((prs
= physr_get_iter(&iter
)) && offset_d
< end
) {
1932 /* If a SMAP share was requested but the phys block has already
1933 * been shared as COW, copy the block for the source phys region
1937 if(flag
>= 0 && pb
->refcount
> 1
1938 && pb
->share_flag
== PBSH_COW
) {
1939 if(!(prs
= map_clone_ph_block(vms
, vrs
, prs
, &iter
)))
1944 /* Allocate a new phys region. */
1945 if(!SLABALLOC(newphysr
))
1948 /* Set and link the new phys region to the block. */
1950 newphysr
->offset
= offset_d
;
1951 newphysr
->parent
= vrd
;
1952 newphysr
->next_ph_list
= pb
->firstregion
;
1953 pb
->firstregion
= newphysr
;
1954 physr_insert(newphysr
->parent
->phys
, newphysr
);
1957 /* If a COW share was requested but the phys block has already
1958 * been shared as SMAP, give up on COW and copy the block for
1959 * the destination phys region now.
1961 if(flag
< 0 && pb
->refcount
> 1
1962 && pb
->share_flag
== PBSH_SMAP
) {
1963 if(!(newphysr
= map_clone_ph_block(vmd
, vrd
,
1969 /* See if this is a COW share or SMAP share. */
1970 if(flag
< 0) { /* COW share */
1971 pb
->share_flag
= PBSH_COW
;
1972 /* Update the page table for the src process. */
1973 pt_writemap(&vms
->vm_pt
, offset_s
+ vrs
->vaddr
,
1974 pb
->phys
, pb
->length
,
1975 pt_flag
, WMF_OVERWRITE
);
1977 else { /* SMAP share */
1978 pb
->share_flag
= PBSH_SMAP
;
1980 /* Update the page table for the destination process. */
1981 pt_writemap(&vmd
->vm_pt
, offset_d
+ vrd
->vaddr
,
1982 pb
->phys
, pb
->length
, pt_flag
, WMF_OVERWRITE
);
1985 physr_incr_iter(&iter
);
1986 offset_d
+= pb
->length
;
1987 offset_s
+= pb
->length
;
1992 /*===========================================================================*
1994 *===========================================================================*/
1995 PUBLIC
int unmap_memory(endpoint_t sour
, endpoint_t dest
,
1996 vir_bytes virt_s
, vir_bytes virt_d
, vir_bytes length
, int flag
)
1999 struct vir_region
*vrd
;
2000 struct phys_region
*pr
;
2001 struct phys_block
*pb
;
2006 /* Use information on the destination process to unmap. */
2007 if(vm_isokendpt(dest
, &p
) != OK
)
2008 panic("unmap_memory: bad endpoint: %d", dest
);
2011 vrd
= map_lookup(vmd
, virt_d
);
2014 /* Search for the first phys region in the destination process. */
2015 off
= virt_d
- vrd
->vaddr
;
2016 physr_start_iter(vrd
->phys
, &iter
, off
, AVL_EQUAL
);
2017 pr
= physr_get_iter(&iter
);
2019 panic("unmap_memory: no aligned phys region: %d", 0);
2021 /* Copy the phys block now rather than doing COW. */
2023 while((pr
= physr_get_iter(&iter
)) && off
< end
) {
2025 assert(pb
->refcount
> 1);
2026 assert(pb
->share_flag
== PBSH_SMAP
);
2028 if(!(pr
= map_clone_ph_block(vmd
, vrd
, pr
, &iter
)))
2031 physr_incr_iter(&iter
);
2039 /*===========================================================================*
2041 *===========================================================================*/
2042 PRIVATE
int split_phys(struct phys_region
*pr
, vir_bytes point
)
2044 struct phys_region
*newpr
, *q
, *prev
;
2045 struct phys_block
*newpb
;
2046 struct phys_block
*pb
= pr
->ph
;
2047 /* Split the phys region into 2 parts by @point. */
2049 if(pr
->offset
>= point
|| pr
->offset
+ pb
->length
<= point
)
2051 if(!SLABALLOC(newpb
))
2054 /* Split phys block. */
2056 pb
->length
= point
- pr
->offset
;
2057 newpb
->length
-= pb
->length
;
2058 newpb
->phys
+= pb
->length
;
2060 /* Split phys regions in a list. */
2061 for(q
= pb
->firstregion
; q
; q
= q
->next_ph_list
) {
2062 if(!SLABALLOC(newpr
))
2067 newpr
->offset
+= pb
->length
;
2069 /* Link to the vir region's phys region list. */
2070 physr_insert(newpr
->parent
->phys
, newpr
);
2072 /* Link to the next_ph_list. */
2073 if(q
== pb
->firstregion
) {
2074 newpb
->firstregion
= newpr
;
2077 prev
->next_ph_list
= newpr
;
2081 prev
->next_ph_list
= NULL
;
2086 /*===========================================================================*
2087 * clean_phys_regions *
2088 *===========================================================================*/
2089 PRIVATE
void clean_phys_regions(struct vir_region
*region
,
2090 vir_bytes offset
, vir_bytes length
)
2092 /* Consider @offset as the start address and @offset+length as the end address.
2093 * If there are phys regions crossing the start address or the end address,
2094 * split them into 2 parts.
2096 * We assume that the phys regions are listed in order and don't overlap.
2098 struct phys_region
*pr
;
2101 physr_start_iter_least(region
->phys
, &iter
);
2102 while((pr
= physr_get_iter(&iter
))) {
2103 /* If this phys region crosses the start address, split it. */
2104 if(pr
->offset
< offset
2105 && pr
->offset
+ pr
->ph
->length
> offset
) {
2106 split_phys(pr
, offset
);
2107 physr_start_iter_least(region
->phys
, &iter
);
2109 /* If this phys region crosses the end address, split it. */
2110 else if(pr
->offset
< offset
+ length
2111 && pr
->offset
+ pr
->ph
->length
> offset
+ length
) {
2112 split_phys(pr
, offset
+ length
);
2113 physr_start_iter_least(region
->phys
, &iter
);
2116 physr_incr_iter(&iter
);
2121 /*===========================================================================*
2123 *===========================================================================*/
2124 PRIVATE
void rm_phys_regions(struct vir_region
*region
,
2125 vir_bytes begin
, vir_bytes length
)
2127 /* Remove all phys regions between @begin and @begin+length.
2129 * Don't update the page table, because we will update it at map_memory()
2132 struct phys_region
*pr
;
2135 physr_start_iter(region
->phys
, &iter
, begin
, AVL_GREATER_EQUAL
);
2136 while((pr
= physr_get_iter(&iter
)) && pr
->offset
< begin
+ length
) {
2137 pb_unreferenced(region
, pr
);
2138 physr_remove(region
->phys
, pr
->offset
);
2139 physr_start_iter(region
->phys
, &iter
, begin
,
2145 /*===========================================================================*
2147 *===========================================================================*/
2148 PUBLIC
int map_memory(endpoint_t sour
, endpoint_t dest
,
2149 vir_bytes virt_s
, vir_bytes virt_d
, vir_bytes length
, int flag
)
2151 /* This is the entry point. This function will be called by handle_memory() when
2152 * VM recieves a map-memory request.
2154 struct vmproc
*vms
, *vmd
;
2155 struct vir_region
*vrs
, *vrd
;
2157 vir_bytes offset_s
, offset_d
;
2161 if(vm_isokendpt(sour
, &p
) != OK
)
2162 panic("map_memory: bad endpoint: %d", sour
);
2164 if(vm_isokendpt(dest
, &p
) != OK
)
2165 panic("map_memory: bad endpoint: %d", dest
);
2168 vrs
= map_lookup(vms
, virt_s
);
2170 vrd
= map_lookup(vmd
, virt_d
);
2173 /* Linear address -> offset from start of vir region. */
2174 offset_s
= virt_s
- vrs
->vaddr
;
2175 offset_d
= virt_d
- vrd
->vaddr
;
2177 /* Make sure that the range in the source process has been mapped
2178 * to physical memory.
2180 map_handle_memory(vms
, vrs
, offset_s
, length
, 0);
2183 clean_phys_regions(vrs
, offset_s
, length
);
2184 clean_phys_regions(vrd
, offset_d
, length
);
2185 rm_phys_regions(vrd
, offset_d
, length
);
2188 r
= do_map_memory(vms
, vmd
, vrs
, vrd
, offset_s
, offset_d
, length
, flag
);
2193 /*========================================================================*
2195 *========================================================================*/
2197 map_lookup_phys(struct vmproc
*vmp
, u32_t tag
)
2199 struct vir_region
*vr
;
2200 struct phys_region
*pr
;
2203 if(!(vr
= map_region_lookup_tag(vmp
, tag
))) {
2204 printf("VM: request for phys of missing region\n");
2208 physr_start_iter_least(vr
->phys
, &iter
);
2210 if(!(pr
= physr_get_iter(&iter
))) {
2211 printf("VM: request for phys of unmapped region\n");
2215 if(pr
->offset
!= 0 || pr
->ph
->length
!= vr
->length
) {
2216 printf("VM: request for phys of partially mapped region\n");
2220 return pr
->ph
->phys
;
2223 /*===========================================================================*
2224 * get_clean_phys_region *
2225 *===========================================================================*/
2226 PRIVATE
struct phys_region
*
2227 get_clean_phys_region(struct vmproc
*vmp
, vir_bytes vaddr
, vir_bytes length
,
2228 struct vir_region
**ret_region
)
2230 struct vir_region
*region
;
2231 vir_bytes regionoffset
, mapaddr
;
2232 struct phys_region
*ph
;
2234 mapaddr
= arch_vir2map(vmp
, vaddr
);
2236 if(!(region
= map_lookup(vmp
, mapaddr
))) {
2237 printf("VM: get_clean_phys_region: 0x%lx not found\n", vaddr
);
2241 if(!(region
->flags
& VR_ANON
)) {
2242 printf("VM: get_clean_phys_region: non-anon 0x%lx\n", vaddr
);
2246 assert(mapaddr
>= region
->vaddr
);
2247 assert(mapaddr
< region
->vaddr
+ region
->length
);
2249 regionoffset
= mapaddr
-region
->vaddr
;
2251 /* For now, only support the yielding of blocks that are
2252 * exactly a mapped phys_region. Go get that phys_region.
2253 * (This can be improved without changing the interface.)
2255 if(!(ph
= physr_search(region
->phys
, regionoffset
,
2257 printf("VM: get_clean_phys_region: exact block not found\n");
2261 /* Make sure this is what we asked for. */
2262 assert(ph
->offset
== regionoffset
);
2264 if(ph
->ph
->length
!= length
) {
2265 printf("VM: get_clean_phys_region: len mismatch (%d, %d)\n",
2266 ph
->ph
->length
, length
);
2270 /* If it's mapped more than once, make a copy. */
2271 assert(ph
->ph
->refcount
> 0);
2272 if(ph
->ph
->refcount
> 1) {
2274 if(!(ph
= map_clone_ph_block(vmp
, region
,
2276 printf("VM: get_clean_phys_region: ph copy failed\n");
2281 assert(ph
->ph
->refcount
== 1);
2283 *ret_region
= region
;
2288 PRIVATE
int getblock(struct vmproc
*vmp
, u64_t id
,
2289 vir_bytes vaddr
, vir_bytes len
)
2292 struct phys_region
*ph
;
2293 struct vir_region
*region
;
2295 /* Try to get the yielded block */
2296 if(!(yb
= yielded_search(&vmp
->vm_yielded_blocks
, id
, AVL_EQUAL
))) {
2300 /* Check the size as a sanity check. */
2301 if(yb
->len
!= len
) {
2302 printf("VM: id 0x%lx%08lx mismatched size (%d, %d) for %d\n",
2303 ex64hi(id
), ex64lo(id
), yb
->len
, len
, vmp
->vm_endpoint
);
2307 /* Get the intended phys region, make sure refcount is 1. */
2308 if(!(ph
= get_clean_phys_region(vmp
, vaddr
, len
, ®ion
))) {
2309 printf("VM: getblock: not found for %d\n", vmp
->vm_endpoint
);
2313 assert(ph
->ph
->refcount
== 1);
2315 /* Free the block that is currently there. */
2316 free_mem(ABS2CLICK(ph
->ph
->phys
), ABS2CLICK(ph
->ph
->length
));
2318 /* Set the phys block to new addr and update pagetable. */
2319 USE(ph
->ph
, ph
->ph
->phys
= yb
->addr
;);
2320 if(map_ph_writept(vmp
, region
, ph
) != OK
) {
2321 /* Presumably it was mapped, so there is no reason
2322 * updating should fail.
2324 panic("do_get_block: couldn't write pt");
2327 /* Forget about the yielded block and free the struct. */
2328 freeyieldednode(yb
, 0);
2333 PRIVATE
int yieldblock(struct vmproc
*vmp
, u64_t id
,
2334 vir_bytes vaddr
, vir_bytes len
, yielded_t
**retyb
)
2337 vir_bytes mem_clicks
, newmem
, clicks
;
2338 struct vir_region
*region
;
2339 struct phys_region
*ph
;
2341 /* Makes no sense if yielded block ID already exists, and
2342 * is likely a serious bug in the caller.
2344 if(yielded_search(&vmp
->vm_yielded_blocks
, id
, AVL_EQUAL
)) {
2349 if(!(ph
= get_clean_phys_region(vmp
, vaddr
, len
, ®ion
))) {
2350 printf("VM: do_yield_block: not found for %d\n",
2355 /* Make a new block to record the yielding in. */
2356 if(!SLABALLOC(newyb
)) {
2360 assert(!(ph
->ph
->phys
% VM_PAGE_SIZE
));
2361 assert(!(ph
->ph
->length
% VM_PAGE_SIZE
));
2363 clicks
= CLICKSPERPAGE
* ph
->ph
->length
/ VM_PAGE_SIZE
;
2364 if((mem_clicks
= alloc_mem(clicks
, PAF_CLEAR
)) == NO_MEM
) {
2369 /* Update yielded block info. */
2372 newyb
->addr
= ph
->ph
->phys
;
2373 newyb
->len
= ph
->ph
->length
;
2374 newyb
->owner
= vmp
->vm_endpoint
;
2375 newyb
->younger
= NULL
;);
2377 /* Set new phys block to new addr and update pagetable. */
2379 ph
->ph
->phys
= CLICK2ABS(mem_clicks
););
2380 if(map_ph_writept(vmp
, region
, ph
) != OK
) {
2381 /* Presumably it was mapped, so there is no reason
2382 * updating should fail.
2384 panic("yield_block: couldn't write pt");
2387 /* Remember yielded block. */
2388 yielded_insert(&vmp
->vm_yielded_blocks
, newyb
);
2390 /* Add to LRU list too. It's the youngest block. */
2395 lru_youngest
->younger
= newyb
;);
2401 newyb
->older
= lru_youngest
;);
2403 lru_youngest
= newyb
;
2413 /*===========================================================================*
2415 *===========================================================================*/
2416 PUBLIC
int do_forgetblocks(message
*m
)
2420 endpoint_t caller
= m
->m_source
;
2422 if(vm_isokendpt(caller
, &n
) != OK
)
2423 panic("do_yield_block: message from strange source: %d",
2428 if(!(vmp
->vm_flags
& VMF_HASPT
)) {
2429 printf("do_forgetblocks: no pt\n");
2433 free_yielded_proc(vmp
);
2438 /*===========================================================================*
2440 *===========================================================================*/
2441 PUBLIC
int do_forgetblock(message
*m
)
2445 endpoint_t caller
= m
->m_source
;
2449 if(vm_isokendpt(caller
, &n
) != OK
)
2450 panic("do_yield_block: message from strange source: %d",
2455 if(!(vmp
->vm_flags
& VMF_HASPT
)) {
2456 printf("do_forgetblock: no pt\n");
2460 id
= make64(m
->VMFB_IDLO
, m
->VMFB_IDHI
);
2462 if((yb
= yielded_search(&vmp
->vm_yielded_blocks
, id
, AVL_EQUAL
))) {
2463 freeyieldednode(yb
, 1);
2469 /*===========================================================================*
2470 * do_yieldblockgetblock *
2471 *===========================================================================*/
2472 PUBLIC
int do_yieldblockgetblock(message
*m
)
2474 u64_t yieldid
, getid
;
2476 endpoint_t caller
= m
->m_source
;
2478 yielded_t
*yb
= NULL
;
2482 if(vm_isokendpt(caller
, &n
) != OK
)
2483 panic("do_yieldblockgetblock: message from strange source: %d",
2488 if(!(vmp
->vm_flags
& VMF_HASPT
)) {
2489 printf("do_yieldblockgetblock: no pt\n");
2493 len
= m
->VMYBGB_LEN
;
2495 if((len
% VM_PAGE_SIZE
)) {
2496 len
+= VM_PAGE_SIZE
- len
% VM_PAGE_SIZE
;
2499 yieldid
= make64(m
->VMYBGB_YIELDIDLO
, m
->VMYBGB_YIELDIDHI
);
2500 getid
= make64(m
->VMYBGB_GETIDLO
, m
->VMYBGB_GETIDHI
);
2502 if(cmp64(yieldid
, VM_BLOCKID_NONE
) != 0) {
2503 /* A block was given to yield. */
2504 yieldblock(vmp
, yieldid
, (vir_bytes
) m
->VMYBGB_VADDR
, len
, &yb
);
2507 if(cmp64(getid
, VM_BLOCKID_NONE
) != 0) {
2508 /* A block was given to get. */
2509 r
= getblock(vmp
, getid
, (vir_bytes
) m
->VMYBGB_VADDR
, len
);