3 #include <minix/callnr.h>
4 #include <minix/type.h>
5 #include <minix/config.h>
6 #include <minix/const.h>
7 #include <minix/sysutil.h>
8 #include <minix/syslib.h>
9 #include <minix/debug.h>
10 #include <minix/bitmap.h>
11 #include <minix/hash.h>
12 #include <machine/multiboot.h>
21 #include <sys/param.h>
28 #include "sanitycheck.h"
29 #include "yieldedavl.h"
34 static yielded_t
*lru_youngest
= NULL
, *lru_oldest
= NULL
;
36 static int map_ph_writept(struct vmproc
*vmp
, struct vir_region
*vr
,
37 struct phys_region
*pr
);
39 static phys_bytes
freeyieldednode(yielded_t
*node
, int freemem
);
41 static struct vir_region
*map_copy_region(struct vmproc
*vmp
, struct
45 static void lrucheck(void);
48 /* hash table of yielded blocks */
49 #define YIELD_HASHSIZE 65536
50 static yielded_avl vm_yielded_blocks
[YIELD_HASHSIZE
];
52 static int avl_inited
= 0;
54 void map_region_init(void)
58 for(h
= 0; h
< YIELD_HASHSIZE
; h
++)
59 yielded_init(&vm_yielded_blocks
[h
]);
63 static yielded_avl
*get_yielded_avl(block_id_t id
)
69 hash_i_64(id
.owner
, id
.id
, h
);
70 h
= h
% YIELD_HASHSIZE
;
73 assert(h
< YIELD_HASHSIZE
);
75 return &vm_yielded_blocks
[h
];
78 void map_printregion(struct vir_region
*vr
)
81 struct phys_region
*ph
;
82 printf("map_printmap: map_name: %s\n", vr
->memtype
->name
);
83 printf("\t%lx (len 0x%lx, %lukB), %p\n",
84 vr
->vaddr
, vr
->length
, vr
->length
/1024, vr
->memtype
->name
);
85 printf("\t\tphysblocks:\n");
86 for(i
= 0; i
< vr
->length
/VM_PAGE_SIZE
; i
++) {
87 if(!(ph
=vr
->physblocks
[i
])) continue;
88 printf("\t\t@ %lx (refs %d): phys 0x%lx\n",
89 (vr
->vaddr
+ ph
->offset
),
90 ph
->ph
->refcount
, ph
->ph
->phys
);
94 struct phys_region
*physblock_get(struct vir_region
*region
, vir_bytes offset
)
97 struct phys_region
*foundregion
;
98 assert(!(offset
% VM_PAGE_SIZE
));
99 assert(offset
>= 0 && offset
< region
->length
);
100 i
= offset
/VM_PAGE_SIZE
;
101 if((foundregion
= region
->physblocks
[i
]))
102 assert(foundregion
->offset
== offset
);
106 void physblock_set(struct vir_region
*region
, vir_bytes offset
,
107 struct phys_region
*newphysr
)
110 assert(!(offset
% VM_PAGE_SIZE
));
111 assert(offset
>= 0 && offset
< region
->length
);
112 i
= offset
/VM_PAGE_SIZE
;
114 assert(!region
->physblocks
[i
]);
115 assert(newphysr
->offset
== offset
);
117 assert(region
->physblocks
[i
]);
119 region
->physblocks
[i
] = newphysr
;
122 /*===========================================================================*
124 *===========================================================================*/
125 void map_printmap(vmp
)
128 struct vir_region
*vr
;
131 printf("memory regions in process %d:\n", vmp
->vm_endpoint
);
133 region_start_iter_least(&vmp
->vm_regions_avl
, &iter
);
134 while((vr
= region_get_iter(&iter
))) {
136 region_incr_iter(&iter
);
140 static struct vir_region
*getnextvr(struct vir_region
*vr
)
142 struct vir_region
*nextvr
;
145 region_start_iter(&vr
->parent
->vm_regions_avl
, &v_iter
, vr
->vaddr
, AVL_EQUAL
);
146 assert(region_get_iter(&v_iter
));
147 assert(region_get_iter(&v_iter
) == vr
);
148 region_incr_iter(&v_iter
);
149 nextvr
= region_get_iter(&v_iter
);
150 if(!nextvr
) return NULL
;
152 assert(vr
->parent
== nextvr
->parent
);
153 assert(vr
->vaddr
< nextvr
->vaddr
);
154 assert(vr
->vaddr
+ vr
->length
<= nextvr
->vaddr
);
158 int pr_writable(struct vir_region
*vr
, struct phys_region
*pr
)
160 assert(vr
->memtype
->writable
);
161 return ((vr
->flags
& VR_WRITABLE
) && vr
->memtype
->writable(pr
));
166 /*===========================================================================*
167 * map_sanitycheck_pt *
168 *===========================================================================*/
169 static int map_sanitycheck_pt(struct vmproc
*vmp
,
170 struct vir_region
*vr
, struct phys_region
*pr
)
172 struct phys_block
*pb
= pr
->ph
;
176 if(pr_writable(vr
, pr
))
181 r
= pt_writemap(vmp
, &vmp
->vm_pt
, vr
->vaddr
+ pr
->offset
,
182 pb
->phys
, VM_PAGE_SIZE
, PTF_PRESENT
| PTF_USER
| rw
, WMF_VERIFY
);
185 printf("proc %d phys_region 0x%lx sanity check failed\n",
186 vmp
->vm_endpoint
, pr
->offset
);
193 /*===========================================================================*
195 *===========================================================================*/
196 void map_sanitycheck(char *file
, int line
)
202 /* Macro for looping over all physical blocks of all regions of
205 #define ALLREGIONS(regioncode, physcode) \
206 for(vmp = vmproc; vmp < &vmproc[VMP_NR]; vmp++) { \
208 region_iter v_iter; \
209 struct vir_region *vr; \
210 if(!(vmp->vm_flags & VMF_INUSE)) \
212 region_start_iter_least(&vmp->vm_regions_avl, &v_iter); \
213 while((vr = region_get_iter(&v_iter))) { \
214 struct phys_region *pr; \
216 for(voffset = 0; voffset < vr->length; \
217 voffset += VM_PAGE_SIZE) { \
218 if(!(pr = physblock_get(vr, voffset))) \
222 region_incr_iter(&v_iter); \
226 #define MYSLABSANE(s) MYASSERT(slabsane_f(__FILE__, __LINE__, s, sizeof(*(s))))
227 /* Basic pointers check. */
228 ALLREGIONS(MYSLABSANE(vr
),MYSLABSANE(pr
); MYSLABSANE(pr
->ph
);MYSLABSANE(pr
->parent
));
229 ALLREGIONS(/* MYASSERT(vr->parent == vmp) */,MYASSERT(pr
->parent
== vr
););
231 /* Do counting for consistency check. */
232 ALLREGIONS(;,USE(pr
->ph
, pr
->ph
->seencount
= 0;););
233 ALLREGIONS(;,MYASSERT(pr
->offset
== voffset
););
234 ALLREGIONS(;,USE(pr
->ph
, pr
->ph
->seencount
++;);
235 if(pr
->ph
->seencount
== 1) {
236 if(pr
->parent
->memtype
->ev_sanitycheck
)
237 pr
->parent
->memtype
->ev_sanitycheck(pr
, file
, line
);
241 /* Do consistency check. */
242 ALLREGIONS({ struct vir_region
*nextvr
= getnextvr(vr
);
244 MYASSERT(vr
->vaddr
< nextvr
->vaddr
);
245 MYASSERT(vr
->vaddr
+ vr
->length
<= nextvr
->vaddr
);
248 MYASSERT(!(vr
->vaddr
% VM_PAGE_SIZE
));,
249 if(pr
->ph
->refcount
!= pr
->ph
->seencount
) {
251 printf("ph in vr %p: 0x%lx refcount %u "
252 "but seencount %u\n",
254 pr
->ph
->refcount
, pr
->ph
->seencount
);
258 struct phys_region
*others
;
259 if(pr
->ph
->refcount
> 0) {
260 MYASSERT(pr
->ph
->firstregion
);
261 if(pr
->ph
->refcount
== 1) {
262 MYASSERT(pr
->ph
->firstregion
== pr
);
265 MYASSERT(!pr
->ph
->firstregion
);
267 for(others
= pr
->ph
->firstregion
; others
;
268 others
= others
->next_ph_list
) {
270 MYASSERT(others
->ph
== pr
->ph
);
273 MYASSERT(pr
->ph
->refcount
== n_others
);
275 MYASSERT(pr
->ph
->refcount
== pr
->ph
->seencount
);
276 MYASSERT(!(pr
->offset
% VM_PAGE_SIZE
)););
277 ALLREGIONS(,MYASSERT(map_sanitycheck_pt(vmp
, vr
, pr
) == OK
));
280 #define LRUCHECK lrucheck()
282 static void lrucheck(void)
286 /* list is empty and ok if both ends point to null. */
287 if(!lru_youngest
&& !lru_oldest
)
290 /* if not, both should point to something. */
291 SLABSANE(lru_youngest
);
292 SLABSANE(lru_oldest
);
294 assert(!lru_youngest
->younger
);
295 assert(!lru_oldest
->older
);
297 for(list
= lru_youngest
; list
; list
= list
->older
) {
300 SLABSANE(list
->younger
);
301 assert(list
->younger
->older
== list
);
302 } else assert(list
== lru_youngest
);
304 SLABSANE(list
->older
);
305 assert(list
->older
->younger
== list
);
306 } else assert(list
== lru_oldest
);
310 void blockstats(void)
318 s
= getuptime(&ticks
);
324 for(list
= lru_youngest
; list
; list
= list
->older
) {
330 printf("%d blocks, %lukB; ", blocks
, mem
/1024);
339 /*=========================================================================*
341 *=========================================================================*/
342 static int map_ph_writept(struct vmproc
*vmp
, struct vir_region
*vr
,
343 struct phys_region
*pr
)
346 struct phys_block
*pb
= pr
->ph
;
352 assert(!(vr
->vaddr
% VM_PAGE_SIZE
));
353 assert(!(pr
->offset
% VM_PAGE_SIZE
));
354 assert(pb
->refcount
> 0);
356 if(pr_writable(vr
, pr
))
361 if(pt_writemap(vmp
, &vmp
->vm_pt
, vr
->vaddr
+ pr
->offset
,
362 pb
->phys
, VM_PAGE_SIZE
, PTF_PRESENT
| PTF_USER
| rw
,
366 WMF_OVERWRITE
) != OK
) {
367 printf("VM: map_writept: pt_writemap failed\n");
372 USE(pr
, pr
->written
= 1;);
378 #define SLOT_FAIL ((vir_bytes) -1)
380 /*===========================================================================*
381 * region_find_slot_range *
382 *===========================================================================*/
383 static vir_bytes
region_find_slot_range(struct vmproc
*vmp
,
384 vir_bytes minv
, vir_bytes maxv
, vir_bytes length
)
386 struct vir_region
*lastregion
;
387 vir_bytes startv
= 0;
391 SANITYCHECK(SCL_FUNCTIONS
);
393 /* Length must be reasonable. */
396 /* Special case: allow caller to set maxv to 0 meaning 'I want
397 * it to be mapped in right here.'
400 maxv
= minv
+ length
;
404 printf("region_find_slot: minv 0x%lx and bytes 0x%lx\n",
410 /* Basic input sanity checks. */
411 assert(!(length
% VM_PAGE_SIZE
));
413 printf("VM: 1 minv: 0x%lx maxv: 0x%lx length: 0x%lx\n",
419 if(minv
+ length
> maxv
)
422 #define FREEVRANGE_TRY(rangestart, rangeend) { \
423 vir_bytes frstart = (rangestart), frend = (rangeend); \
424 frstart = MAX(frstart, minv); \
425 frend = MIN(frend, maxv); \
426 if(frend > frstart && (frend - frstart) >= length) { \
427 startv = frend-length; \
431 #define FREEVRANGE(start, end) { \
432 assert(!foundflag); \
433 FREEVRANGE_TRY(((start)+VM_PAGE_SIZE), ((end)-VM_PAGE_SIZE)); \
435 FREEVRANGE_TRY((start), (end)); \
439 /* find region after maxv. */
440 region_start_iter(&vmp
->vm_regions_avl
, &iter
, maxv
, AVL_GREATER_EQUAL
);
441 lastregion
= region_get_iter(&iter
);
444 /* This is the free virtual address space after the last region. */
445 region_start_iter(&vmp
->vm_regions_avl
, &iter
, maxv
, AVL_LESS
);
446 lastregion
= region_get_iter(&iter
);
447 FREEVRANGE(lastregion
?
448 lastregion
->vaddr
+lastregion
->length
: 0, VM_DATATOP
);
452 struct vir_region
*vr
;
453 while((vr
= region_get_iter(&iter
)) && !foundflag
) {
454 struct vir_region
*nextvr
;
455 region_decr_iter(&iter
);
456 nextvr
= region_get_iter(&iter
);
457 FREEVRANGE(nextvr
? nextvr
->vaddr
+nextvr
->length
: 0,
466 /* However we got it, startv must be in the requested range. */
467 assert(startv
>= minv
);
468 assert(startv
< maxv
);
469 assert(startv
+ length
<= maxv
);
471 /* remember this position as a hint for next time. */
472 vmp
->vm_region_top
= startv
+ length
;
477 /*===========================================================================*
479 *===========================================================================*/
480 static vir_bytes
region_find_slot(struct vmproc
*vmp
,
481 vir_bytes minv
, vir_bytes maxv
, vir_bytes length
)
483 vir_bytes v
, hint
= vmp
->vm_region_top
;
485 /* use the top of the last inserted region as a minv hint if
486 * possible. remember that a zero maxv is a special case.
489 if(maxv
&& hint
< maxv
&& hint
>= minv
) {
490 v
= region_find_slot_range(vmp
, minv
, hint
, length
);
496 return region_find_slot_range(vmp
, minv
, maxv
, length
);
499 static int phys_slot(vir_bytes len
)
501 assert(!(len
% VM_PAGE_SIZE
));
502 return len
/ VM_PAGE_SIZE
;
505 struct vir_region
*region_new(struct vmproc
*vmp
, vir_bytes startv
, vir_bytes length
,
506 int flags
, mem_type_t
*memtype
)
508 struct vir_region
*newregion
;
509 struct phys_region
**physregions
;
511 int slots
= phys_slot(length
);
513 if(!(SLABALLOC(newregion
))) {
514 printf("vm: region_new: could not allocate\n");
518 /* Fill in node details. */
520 memset(newregion
, 0, sizeof(*newregion
));
521 newregion
->vaddr
= startv
;
522 newregion
->length
= length
;
523 newregion
->flags
= flags
;
524 newregion
->memtype
= memtype
;
525 newregion
->remaps
= 0;
526 newregion
->id
= id
++;
527 newregion
->lower
= newregion
->higher
= NULL
;
528 newregion
->parent
= vmp
;);
530 if(!(physregions
= calloc(slots
, sizeof(struct phys_region
*)))) {
531 printf("VM: region_new: allocating phys blocks failed\n");
536 USE(newregion
, newregion
->physblocks
= physregions
;);
541 /*===========================================================================*
543 *===========================================================================*/
544 struct vir_region
*map_page_region(vmp
, minv
, maxv
, length
,
545 flags
, mapflags
, memtype
)
554 struct vir_region
*newregion
;
557 assert(!(length
% VM_PAGE_SIZE
));
559 SANITYCHECK(SCL_FUNCTIONS
);
561 startv
= region_find_slot(vmp
, minv
, maxv
, length
);
562 if (startv
== SLOT_FAIL
)
565 /* Now we want a new region. */
566 if(!(newregion
= region_new(vmp
, startv
, length
, flags
, memtype
))) {
567 printf("VM: map_page_region: allocating region failed\n");
571 /* If a new event is specified, invoke it. */
572 if(newregion
->memtype
->ev_new
) {
573 if(newregion
->memtype
->ev_new(newregion
) != OK
) {
574 /* ev_new will have freed and removed the region */
579 if(mapflags
& MF_PREALLOC
) {
580 if(map_handle_memory(vmp
, newregion
, 0, length
, 1) != OK
) {
581 printf("VM: map_page_region: prealloc failed\n");
582 free(newregion
->physblocks
);
584 newregion
->physblocks
= NULL
;);
590 /* Pre-allocations should be uninitialized, but after that it's a
593 USE(newregion
, newregion
->flags
&= ~VR_UNINITIALIZED
;);
596 region_insert(&vmp
->vm_regions_avl
, newregion
);
599 assert(startv
== newregion
->vaddr
);
601 struct vir_region
*nextvr
;
602 if((nextvr
= getnextvr(newregion
))) {
603 assert(newregion
->vaddr
< nextvr
->vaddr
);
608 SANITYCHECK(SCL_FUNCTIONS
);
613 /*===========================================================================*
615 *===========================================================================*/
616 static int map_subfree(struct vir_region
*region
,
617 vir_bytes start
, vir_bytes len
)
619 struct phys_region
*pr
;
620 vir_bytes end
= start
+len
;
625 for(voffset
= 0; voffset
< phys_slot(region
->length
);
626 voffset
+= VM_PAGE_SIZE
) {
627 struct phys_region
*others
;
628 struct phys_block
*pb
;
630 if(!(pr
= physblock_get(region
, voffset
)))
635 for(others
= pb
->firstregion
; others
;
636 others
= others
->next_ph_list
) {
637 assert(others
->ph
== pb
);
642 for(voffset
= start
; voffset
< end
; voffset
+=VM_PAGE_SIZE
) {
643 if(!(pr
= physblock_get(region
, voffset
)))
645 assert(pr
->offset
>= start
);
646 assert(pr
->offset
< end
);
647 pb_unreferenced(region
, pr
, 1);
654 /*===========================================================================*
656 *===========================================================================*/
657 int map_free(struct vir_region
*region
)
661 if((r
=map_subfree(region
, 0, region
->length
)) != OK
) {
662 printf("%d\n", __LINE__
);
666 if(region
->memtype
->ev_delete
)
667 region
->memtype
->ev_delete(region
);
668 free(region
->physblocks
);
669 region
->physblocks
= NULL
;
675 /*===========================================================================*
676 * yielded_block_cmp *
677 *===========================================================================*/
678 int yielded_block_cmp(struct block_id
*id1
, struct block_id
*id2
)
680 if(id1
->owner
< id2
->owner
)
682 if(id1
->owner
> id2
->owner
)
684 return cmp64(id1
->id
, id2
->id
);
688 /*===========================================================================*
689 * free_yielded_proc *
690 *===========================================================================*/
691 static vir_bytes
free_yielded_proc(struct vmproc
*vmp
)
696 SANITYCHECK(SCL_FUNCTIONS
);
698 /* Free associated regions. */
699 for(h
= 0; h
< YIELD_HASHSIZE
&& vmp
->vm_yielded
> 0; h
++) {
702 yielded_avl
*avl
= &vm_yielded_blocks
[h
];
703 yielded_start_iter_least(avl
, &iter
);
704 while((yb
= yielded_get_iter(&iter
))) {
707 yielded_incr_iter(&iter
);
708 if(yb
->id
.owner
!= vmp
->vm_endpoint
)
710 next_yb
= yielded_get_iter(&iter
);
711 total
+= freeyieldednode(yb
, 1);
712 /* the above removal invalidated our iter; restart it
713 * for the node we want to start at.
716 yielded_start_iter(avl
, &iter
, next_yb
->id
, AVL_EQUAL
);
717 assert(yielded_get_iter(&iter
) == next_yb
);
725 static phys_bytes
freeyieldednode(yielded_t
*node
, int freemem
)
727 yielded_t
*older
, *younger
, *removed
;
737 younger
= node
->younger
;
742 assert(younger
->older
== node
);
743 USE(younger
, younger
->older
= node
->older
;);
745 assert(node
== lru_youngest
);
746 lru_youngest
= node
->older
;
751 assert(older
->younger
== node
);
752 USE(older
, older
->younger
= node
->younger
;);
754 assert(node
== lru_oldest
);
755 lru_oldest
= node
->younger
;
762 if(vm_isokendpt(node
->id
.owner
, &p
) != OK
)
763 panic("out of date owner of yielded block %d", node
->id
.owner
);
764 avl
= get_yielded_avl(node
->id
);
765 removed
= yielded_remove(avl
, node
->id
);
766 assert(removed
== node
);
767 assert(vmproc
[p
].vm_yielded
> 0);
768 vmproc
[p
].vm_yielded
--;
770 /* Free associated memory if requested. */
773 free_mem(ABS2CLICK(node
->physaddr
), node
->pages
);
782 /*========================================================================*
784 *========================================================================*/
785 vir_bytes
free_yielded(vir_bytes max_bytes
)
788 /* PRIVATE yielded_t *lru_youngest = NULL, *lru_oldest = NULL; */
792 while(freed
< max_bytes
&& lru_oldest
) {
793 SLABSANE(lru_oldest
);
794 freed
+= freeyieldednode(lru_oldest
, 1);
801 /*========================================================================*
803 *========================================================================*/
804 int map_free_proc(vmp
)
807 struct vir_region
*r
;
809 while((r
= region_search_root(&vmp
->vm_regions_avl
))) {
810 SANITYCHECK(SCL_DETAIL
);
814 region_remove(&vmp
->vm_regions_avl
, r
->vaddr
); /* For sanity checks. */
819 SANITYCHECK(SCL_DETAIL
);
822 region_init(&vmp
->vm_regions_avl
);
824 /* Free associated yielded blocks. */
825 free_yielded_proc(vmp
);
827 SANITYCHECK(SCL_FUNCTIONS
);
832 /*===========================================================================*
834 *===========================================================================*/
835 struct vir_region
*map_lookup(vmp
, offset
, physr
)
838 struct phys_region
**physr
;
840 struct vir_region
*r
;
842 SANITYCHECK(SCL_FUNCTIONS
);
845 if(!region_search_root(&vmp
->vm_regions_avl
))
846 panic("process has no regions: %d", vmp
->vm_endpoint
);
849 if((r
= region_search(&vmp
->vm_regions_avl
, offset
, AVL_LESS_EQUAL
))) {
851 if(offset
>= r
->vaddr
&& offset
< r
->vaddr
+ r
->length
) {
852 ph
= offset
- r
->vaddr
;
854 *physr
= physblock_get(r
, ph
);
855 if(*physr
) assert((*physr
)->offset
== ph
);
861 SANITYCHECK(SCL_FUNCTIONS
);
866 u32_t
vrallocflags(u32_t flags
)
868 u32_t allocflags
= 0;
870 if(flags
& VR_PHYS64K
)
871 allocflags
|= PAF_ALIGN64K
;
872 if(flags
& VR_LOWER16MB
)
873 allocflags
|= PAF_LOWER16MB
;
874 if(flags
& VR_LOWER1MB
)
875 allocflags
|= PAF_LOWER1MB
;
876 if(flags
& VR_CONTIG
)
877 allocflags
|= PAF_CONTIG
;
878 if(!(flags
& VR_UNINITIALIZED
))
879 allocflags
|= PAF_CLEAR
;
884 /*===========================================================================*
885 * map_clone_ph_block *
886 *===========================================================================*/
887 struct phys_region
*map_clone_ph_block(vmp
, region
, ph
)
889 struct vir_region
*region
;
890 struct phys_region
*ph
;
895 struct phys_region
*newpr
;
896 int region_has_single_block
;
897 SANITYCHECK(SCL_FUNCTIONS
);
899 /* Warning: this function will free the passed
900 * phys_region *ph and replace it (in the same offset)
901 * with another! So both the pointer to it
902 * and any iterators over the phys_regions in the vir_region
903 * will be invalid on successful return. (Iterators over
904 * the vir_region could be invalid on unsuccessful return too.)
907 /* This is only to be done if there is more than one copy. */
908 assert(ph
->ph
->refcount
> 1);
910 /* This function takes a physical block, copies its contents
911 * into newly allocated memory, and replaces the single physical
912 * block by one or more physical blocks with refcount 1 with the
913 * same contents as the original. In other words, a fragmentable
914 * version of map_copy_ph_block().
917 /* Remember where and how much. */
919 physaddr
= ph
->ph
->phys
;
921 /* Now unlink the original physical block so we can replace
927 assert(ph
->ph
->refcount
> 1);
928 pb_unreferenced(region
, ph
, 1);
931 SANITYCHECK(SCL_DETAIL
);
933 /* Put new free memory in. */
934 allocflags
= vrallocflags(region
->flags
| VR_UNINITIALIZED
);
935 region_has_single_block
= (offset
== 0 && region
->length
== VM_PAGE_SIZE
);
936 assert(region_has_single_block
|| !(allocflags
& PAF_CONTIG
));
937 assert(!(allocflags
& PAF_CLEAR
));
939 if(map_pf(vmp
, region
, offset
, 1) != OK
) {
940 /* XXX original range now gone. */
941 printf("VM: map_clone_ph_block: map_pf failed.\n");
945 /* Copy the block to the new memory.
946 * Can only fail if map_new_physblock didn't do what we asked.
948 if(copy_abs2region(physaddr
, region
, offset
, VM_PAGE_SIZE
) != OK
)
949 panic("copy_abs2region failed, no good reason for that");
951 newpr
= physblock_get(region
, offset
);
953 assert(newpr
->offset
== offset
);
955 SANITYCHECK(SCL_FUNCTIONS
);
961 /*===========================================================================*
963 *===========================================================================*/
964 int map_pf(vmp
, region
, offset
, write
)
966 struct vir_region
*region
;
970 struct phys_region
*ph
;
973 offset
-= offset
% VM_PAGE_SIZE
;
976 assert(offset
< region
->length
);
978 assert(!(region
->vaddr
% VM_PAGE_SIZE
));
979 assert(!(write
&& !(region
->flags
& VR_WRITABLE
)));
981 SANITYCHECK(SCL_FUNCTIONS
);
983 if(!(ph
= physblock_get(region
, offset
))) {
984 struct phys_block
*pb
;
988 if(!(pb
= pb_new(MAP_NONE
))) {
989 printf("map_pf: pb_new failed\n");
993 if(!(ph
= pb_reference(pb
, offset
, region
))) {
994 printf("map_pf: pb_reference failed\n");
1003 /* If we're writing and the block is already
1004 * writable, nothing to do.
1007 assert(region
->memtype
->writable
);
1009 if(!write
|| !region
->memtype
->writable(ph
)) {
1010 assert(region
->memtype
->ev_pagefault
);
1013 if((r
= region
->memtype
->ev_pagefault(vmp
,
1014 region
, ph
, write
)) == SUSPEND
) {
1015 panic("map_pf: memtype->ev_pagefault returned SUSPEND\n");
1020 printf("map_pf: memtype->ev_pagefault failed\n");
1022 pb_unreferenced(region
, ph
, 1);
1032 if((r
= map_ph_writept(vmp
, region
, ph
)) != OK
) {
1033 printf("map_pf: writept failed\n");
1037 SANITYCHECK(SCL_FUNCTIONS
);
1040 if(OK
!= pt_checkrange(&vmp
->vm_pt
, region
->vaddr
+offset
,
1041 VM_PAGE_SIZE
, write
)) {
1042 panic("map_pf: pt_checkrange failed: %d", r
);
1049 int map_handle_memory(vmp
, region
, start_offset
, length
, write
)
1051 struct vir_region
*region
;
1052 vir_bytes start_offset
;
1056 vir_bytes offset
, lim
;
1060 lim
= start_offset
+ length
;
1061 assert(lim
> start_offset
);
1063 for(offset
= start_offset
; offset
< lim
; offset
+= VM_PAGE_SIZE
)
1064 if((r
= map_pf(vmp
, region
, offset
, write
)) != OK
)
1070 /*===========================================================================*
1072 *===========================================================================*/
1073 int map_pin_memory(struct vmproc
*vmp
)
1075 struct vir_region
*vr
;
1078 region_start_iter_least(&vmp
->vm_regions_avl
, &iter
);
1079 /* Scan all memory regions. */
1080 while((vr
= region_get_iter(&iter
))) {
1081 /* Make sure region is mapped to physical memory and writable.*/
1082 r
= map_handle_memory(vmp
, vr
, 0, vr
->length
, 1);
1084 panic("map_pin_memory: map_handle_memory failed: %d", r
);
1086 region_incr_iter(&iter
);
1091 /*===========================================================================*
1093 *===========================================================================*/
1094 static struct vir_region
*map_copy_region(struct vmproc
*vmp
, struct vir_region
*vr
)
1096 /* map_copy_region creates a complete copy of the vir_region
1097 * data structure, linking in the same phys_blocks directly,
1098 * but all in limbo, i.e., the caller has to link the vir_region
1099 * to a process. Therefore it doesn't increase the refcount in
1100 * the phys_block; the caller has to do this once it's linked.
1101 * The reason for this is to keep the sanity checks working
1102 * within this function.
1104 struct vir_region
*newvr
;
1105 struct phys_region
*ph
;
1109 cr
= physregions(vr
);
1113 if(!(newvr
= region_new(vr
->parent
, vr
->vaddr
, vr
->length
, vr
->flags
, vr
->memtype
)))
1116 if(vr
->memtype
->ev_copy
&& (r
=vr
->memtype
->ev_copy(vr
, newvr
)) != OK
) {
1118 printf("VM: memtype-specific copy failed (%d)\n", r
);
1122 for(p
= 0; p
< phys_slot(vr
->length
); p
++) {
1123 if(!(ph
= physblock_get(vr
, p
*VM_PAGE_SIZE
))) continue;
1124 struct phys_region
*newph
= pb_reference(ph
->ph
, ph
->offset
, newvr
);
1126 if(!newph
) { map_free(newvr
); return NULL
; }
1129 USE(newph
, newph
->written
= 0;);
1130 assert(physregions(vr
) == cr
);
1135 assert(physregions(vr
) == physregions(newvr
));
1141 /*===========================================================================*
1143 *===========================================================================*/
1144 int copy_abs2region(phys_bytes abs
, struct vir_region
*destregion
,
1145 phys_bytes offset
, phys_bytes len
)
1149 assert(destregion
->physblocks
);
1151 phys_bytes sublen
, suboffset
;
1152 struct phys_region
*ph
;
1154 assert(destregion
->physblocks
);
1155 if(!(ph
= physblock_get(destregion
, offset
))) {
1156 printf("VM: copy_abs2region: no phys region found (1).\n");
1159 assert(ph
->offset
<= offset
);
1160 if(ph
->offset
+VM_PAGE_SIZE
<= offset
) {
1161 printf("VM: copy_abs2region: no phys region found (2).\n");
1164 suboffset
= offset
- ph
->offset
;
1165 assert(suboffset
< VM_PAGE_SIZE
);
1167 if(sublen
> VM_PAGE_SIZE
- suboffset
)
1168 sublen
= VM_PAGE_SIZE
- suboffset
;
1169 assert(suboffset
+ sublen
<= VM_PAGE_SIZE
);
1170 if(ph
->ph
->refcount
!= 1) {
1171 printf("VM: copy_abs2region: refcount not 1.\n");
1175 if(sys_abscopy(abs
, ph
->ph
->phys
+ suboffset
, sublen
) != OK
) {
1176 printf("VM: copy_abs2region: abscopy failed.\n");
1187 /*=========================================================================*
1189 *=========================================================================*/
1190 int map_writept(struct vmproc
*vmp
)
1192 struct vir_region
*vr
;
1193 struct phys_region
*ph
;
1196 region_start_iter_least(&vmp
->vm_regions_avl
, &v_iter
);
1198 while((vr
= region_get_iter(&v_iter
))) {
1200 for(p
= 0; p
< vr
->length
; p
+= VM_PAGE_SIZE
) {
1201 if(!(ph
= physblock_get(vr
, p
))) continue;
1203 if((r
=map_ph_writept(vmp
, vr
, ph
)) != OK
) {
1204 printf("VM: map_writept: failed\n");
1208 region_incr_iter(&v_iter
);
1214 /*========================================================================*
1216 *========================================================================*/
1217 int map_proc_copy(dst
, src
)
1221 /* Copy all the memory regions from the src process to the dst process. */
1222 region_init(&dst
->vm_regions_avl
);
1224 return map_proc_copy_from(dst
, src
, NULL
);
1227 /*========================================================================*
1228 * map_proc_copy_from *
1229 *========================================================================*/
1230 int map_proc_copy_from(dst
, src
, start_src_vr
)
1233 struct vir_region
*start_src_vr
;
1235 struct vir_region
*vr
;
1239 start_src_vr
= region_search_least(&src
->vm_regions_avl
);
1241 assert(start_src_vr
);
1242 assert(start_src_vr
->parent
== src
);
1243 region_start_iter(&src
->vm_regions_avl
, &v_iter
,
1244 start_src_vr
->vaddr
, AVL_EQUAL
);
1245 assert(region_get_iter(&v_iter
) == start_src_vr
);
1247 /* Copy source regions after the destination's last region (if any). */
1249 SANITYCHECK(SCL_FUNCTIONS
);
1251 while((vr
= region_get_iter(&v_iter
))) {
1252 struct vir_region
*newvr
;
1253 if(!(newvr
= map_copy_region(dst
, vr
))) {
1257 USE(newvr
, newvr
->parent
= dst
;);
1258 region_insert(&dst
->vm_regions_avl
, newvr
);
1259 assert(vr
->length
== newvr
->length
);
1264 struct phys_region
*orig_ph
, *new_ph
;
1265 assert(vr
->physblocks
!= newvr
->physblocks
);
1266 for(vaddr
= 0; vaddr
< vr
->length
; vaddr
+= VM_PAGE_SIZE
) {
1267 orig_ph
= physblock_get(vr
, vaddr
);
1268 new_ph
= physblock_get(newvr
, vaddr
);
1269 if(!orig_ph
) { assert(!new_ph
); continue;}
1271 assert(orig_ph
!= new_ph
);
1272 assert(orig_ph
->ph
== new_ph
->ph
);
1276 region_incr_iter(&v_iter
);
1282 SANITYCHECK(SCL_FUNCTIONS
);
1286 int map_region_extend_upto_v(struct vmproc
*vmp
, vir_bytes v
)
1288 vir_bytes offset
= v
;
1289 struct vir_region
*vr
, *nextvr
;
1290 struct phys_region
**newpr
;
1291 int newslots
, prevslots
, addedslots
;
1293 offset
= roundup(offset
, VM_PAGE_SIZE
);
1295 if(!(vr
= region_search(&vmp
->vm_regions_avl
, offset
, AVL_LESS
))) {
1296 printf("VM: nothing to extend\n");
1300 if(vr
->vaddr
+ vr
->length
>= v
) return OK
;
1302 assert(vr
->vaddr
<= offset
);
1303 newslots
= phys_slot(offset
- vr
->vaddr
);
1304 prevslots
= phys_slot(vr
->length
);
1305 assert(newslots
>= prevslots
);
1306 addedslots
= newslots
- prevslots
;
1308 if(!(newpr
= realloc(vr
->physblocks
,
1309 newslots
* sizeof(struct phys_region
*)))) {
1310 printf("VM: map_region_extend_upto_v: realloc failed\n");
1314 vr
->physblocks
= newpr
;
1315 memset(vr
->physblocks
+ prevslots
, 0,
1316 addedslots
* sizeof(struct phys_region
*));
1318 if((nextvr
= getnextvr(vr
))) {
1319 assert(offset
<= nextvr
->vaddr
);
1322 if(nextvr
&& nextvr
->vaddr
< offset
) {
1323 printf("VM: can't grow into next region\n");
1327 if(!vr
->memtype
->ev_resize
) {
1328 printf("VM: can't resize this type of memory\n");
1332 return vr
->memtype
->ev_resize(vmp
, vr
, offset
- vr
->vaddr
);
1335 /*========================================================================*
1336 * map_unmap_region *
1337 *========================================================================*/
1338 int map_unmap_region(struct vmproc
*vmp
, struct vir_region
*r
,
1339 vir_bytes offset
, vir_bytes len
)
1341 /* Shrink the region by 'len' bytes, from the start. Unreference
1342 * memory it used to reference if any.
1344 vir_bytes regionstart
;
1345 int freeslots
= phys_slot(len
);
1347 SANITYCHECK(SCL_FUNCTIONS
);
1349 if(offset
+len
> r
->length
|| (len
% VM_PAGE_SIZE
)) {
1350 printf("VM: bogus length 0x%lx\n", len
);
1354 regionstart
= r
->vaddr
+ offset
;
1356 /* unreference its memory */
1357 map_subfree(r
, offset
, len
);
1359 /* if unmap was at start/end of this region, it actually shrinks */
1361 struct phys_region
*pr
;
1365 region_remove(&vmp
->vm_regions_avl
, r
->vaddr
);
1371 remslots
= phys_slot(r
->length
);
1373 region_insert(&vmp
->vm_regions_avl
, r
);
1375 /* vaddr has increased; to make all the phys_regions
1376 * point to the same addresses, make them shrink by the
1379 for(voffset
= offset
; voffset
< r
->length
;
1380 voffset
+= VM_PAGE_SIZE
) {
1381 if(!(pr
= physblock_get(r
, voffset
))) continue;
1382 assert(pr
->offset
>= offset
);
1383 USE(pr
, pr
->offset
-= len
;);
1386 memmove(r
->physblocks
, r
->physblocks
+ freeslots
,
1387 remslots
* sizeof(struct phys_region
*));
1388 } else if(offset
+ len
== r
->length
) {
1389 assert(len
<= r
->length
);
1393 if(r
->length
== 0) {
1394 /* Whole region disappears. Unlink and free it. */
1395 region_remove(&vmp
->vm_regions_avl
, r
->vaddr
);
1399 SANITYCHECK(SCL_DETAIL
);
1401 if(pt_writemap(vmp
, &vmp
->vm_pt
, regionstart
,
1402 MAP_NONE
, len
, 0, WMF_OVERWRITE
) != OK
) {
1403 printf("VM: map_unmap_region: pt_writemap failed\n");
1407 SANITYCHECK(SCL_FUNCTIONS
);
1412 /*========================================================================*
1414 *========================================================================*/
1415 int map_get_phys(struct vmproc
*vmp
, vir_bytes addr
, phys_bytes
*r
)
1417 struct vir_region
*vr
;
1419 if (!(vr
= map_lookup(vmp
, addr
, NULL
)) ||
1420 (vr
->vaddr
!= addr
))
1423 if (!vr
->memtype
->regionid
)
1427 *r
= vr
->memtype
->regionid(vr
);
1432 /*========================================================================*
1434 *========================================================================*/
1435 int map_get_ref(struct vmproc
*vmp
, vir_bytes addr
, u8_t
*cnt
)
1437 struct vir_region
*vr
;
1439 if (!(vr
= map_lookup(vmp
, addr
, NULL
)) ||
1440 (vr
->vaddr
!= addr
) || !vr
->memtype
->refcount
)
1444 *cnt
= vr
->memtype
->refcount(vr
);
1449 /*========================================================================*
1451 *========================================================================*/
1452 void get_stats_info(struct vm_stats_info
*vsi
)
1456 vsi
->vsi_cached
= 0L;
1458 for(yb
= lru_youngest
; yb
; yb
= yb
->older
)
1462 void get_usage_info_kernel(struct vm_usage_info
*vui
)
1464 memset(vui
, 0, sizeof(*vui
));
1465 vui
->vui_total
= kernel_boot_info
.kernel_allocated_bytes
;
1468 static void get_usage_info_vm(struct vm_usage_info
*vui
)
1470 memset(vui
, 0, sizeof(*vui
));
1471 vui
->vui_total
= kernel_boot_info
.vm_allocated_bytes
+
1472 get_vm_self_pages() * VM_PAGE_SIZE
;
1475 /*========================================================================*
1477 *========================================================================*/
1478 void get_usage_info(struct vmproc
*vmp
, struct vm_usage_info
*vui
)
1480 struct vir_region
*vr
;
1481 struct phys_region
*ph
;
1483 region_start_iter_least(&vmp
->vm_regions_avl
, &v_iter
);
1486 memset(vui
, 0, sizeof(*vui
));
1488 if(vmp
->vm_endpoint
== VM_PROC_NR
) {
1489 get_usage_info_vm(vui
);
1493 if(vmp
->vm_endpoint
< 0) {
1494 get_usage_info_kernel(vui
);
1498 while((vr
= region_get_iter(&v_iter
))) {
1499 for(voffset
= 0; voffset
< vr
->length
; voffset
+= VM_PAGE_SIZE
) {
1500 if(!(ph
= physblock_get(vr
, voffset
))) continue;
1501 /* All present pages are counted towards the total. */
1502 vui
->vui_total
+= VM_PAGE_SIZE
;
1504 if (ph
->ph
->refcount
> 1) {
1505 /* Any page with a refcount > 1 is common. */
1506 vui
->vui_common
+= VM_PAGE_SIZE
;
1508 /* Any common, non-COW page is shared. */
1509 if (vr
->flags
& VR_SHARED
)
1510 vui
->vui_shared
+= VM_PAGE_SIZE
;
1513 region_incr_iter(&v_iter
);
1517 /*===========================================================================*
1519 *===========================================================================*/
1520 int get_region_info(struct vmproc
*vmp
, struct vm_region_info
*vri
,
1521 int max
, vir_bytes
*nextp
)
1523 struct vir_region
*vr
;
1532 region_start_iter(&vmp
->vm_regions_avl
, &v_iter
, next
, AVL_GREATER_EQUAL
);
1533 if(!(vr
= region_get_iter(&v_iter
))) return 0;
1535 for(count
= 0; (vr
= region_get_iter(&v_iter
)) && count
< max
; count
++, vri
++) {
1536 struct phys_region
*ph1
= NULL
, *ph2
= NULL
;
1539 /* Report part of the region that's actually in use. */
1541 /* Get first and last phys_regions, if any */
1542 for(voffset
= 0; voffset
> vr
->length
; voffset
+= VM_PAGE_SIZE
) {
1543 struct phys_region
*ph
;
1544 if(!(ph
= physblock_get(vr
, voffset
))) continue;
1548 if(!ph1
|| !ph2
) { assert(!ph1
&& !ph2
); continue; }
1550 /* Report start+length of region starting from lowest use. */
1551 vri
->vri_addr
= vr
->vaddr
+ ph1
->offset
;
1553 vri
->vri_length
= ph2
->offset
+ VM_PAGE_SIZE
- ph1
->offset
;
1555 /* "AND" the provided protection with per-page protection. */
1556 if (!(vr
->flags
& VR_WRITABLE
))
1557 vri
->vri_prot
&= ~PROT_WRITE
;
1559 next
= vr
->vaddr
+ vr
->length
;
1560 region_incr_iter(&v_iter
);
1567 /*========================================================================*
1568 * regionprintstats *
1569 *========================================================================*/
1570 void printregionstats(struct vmproc
*vmp
)
1572 struct vir_region
*vr
;
1573 struct phys_region
*pr
;
1574 vir_bytes used
= 0, weighted
= 0;
1576 region_start_iter_least(&vmp
->vm_regions_avl
, &v_iter
);
1578 while((vr
= region_get_iter(&v_iter
))) {
1580 region_incr_iter(&v_iter
);
1581 if(vr
->flags
& VR_DIRECT
)
1583 for(voffset
= 0; voffset
< vr
->length
; voffset
+=VM_PAGE_SIZE
) {
1584 if(!(pr
= physblock_get(vr
, voffset
))) continue;
1585 used
+= VM_PAGE_SIZE
;
1586 weighted
+= VM_PAGE_SIZE
/ pr
->ph
->refcount
;
1590 printf("%6lukB %6lukB\n", used
/1024, weighted
/1024);
1595 /*===========================================================================*
1596 * get_clean_phys_region *
1597 *===========================================================================*/
1598 static struct phys_region
*
1599 get_clean_phys_region(struct vmproc
*vmp
, vir_bytes vaddr
, struct vir_region
**ret_region
)
1601 struct vir_region
*region
;
1603 struct phys_region
*ph
;
1607 if(!(region
= map_lookup(vmp
, mapaddr
, &ph
)) || !ph
) {
1608 printf("VM: get_clean_phys_region: 0x%lx not found\n", vaddr
);
1612 assert(mapaddr
>= region
->vaddr
);
1613 assert(mapaddr
< region
->vaddr
+ region
->length
);
1615 /* If it's mapped more than once, make a copy. */
1616 assert(ph
->ph
->refcount
> 0);
1617 if(ph
->ph
->refcount
> 1) {
1618 if(!(ph
= map_clone_ph_block(vmp
, region
,
1620 printf("VM: get_clean_phys_region: ph copy failed\n");
1625 assert(ph
->ph
->refcount
== 1);
1627 *ret_region
= region
;
1632 static int getblock(struct vmproc
*vmp
, u64_t id
, vir_bytes vaddr
, int pages
)
1635 struct phys_region
*ph
;
1636 struct vir_region
*region
;
1642 /* Try to get the yielded block */
1643 blockid
.owner
= vmp
->vm_endpoint
;
1645 avl
= get_yielded_avl(blockid
);
1646 if(!(yb
= yielded_search(avl
, blockid
, AVL_EQUAL
))) {
1650 if(yb
->pages
!= pages
) {
1651 printf("VM: getblock: length mismatch (%d != %d)\n",
1656 phaddr
= yb
->physaddr
;
1658 for(p
= 0; p
< pages
; p
++) {
1659 /* Get the intended phys region, make sure refcount is 1. */
1660 if(!(ph
= get_clean_phys_region(vmp
, vaddr
, ®ion
))) {
1661 printf("VM: getblock: not found for %d\n", vmp
->vm_endpoint
);
1665 assert(ph
->ph
->refcount
== 1);
1667 /* Free the block that is currently there. */
1668 free_mem(ABS2CLICK(ph
->ph
->phys
), 1);
1670 /* Set the phys block to new addr and update pagetable. */
1671 USE(ph
->ph
, ph
->ph
->phys
= phaddr
;);
1672 if(map_ph_writept(vmp
, region
, ph
) != OK
) {
1673 /* Presumably it was mapped, so there is no reason
1674 * updating should fail.
1676 panic("do_get_block: couldn't write pt");
1679 vaddr
+= VM_PAGE_SIZE
;
1680 phaddr
+= VM_PAGE_SIZE
;
1683 /* Forget about the yielded block and free the struct. */
1684 freeyieldednode(yb
, 0);
1689 static int yieldblock(struct vmproc
*vmp
, u64_t id
,
1690 vir_bytes vaddr
, yielded_t
**retyb
, int pages
)
1693 vir_bytes mem_clicks
, v
, p
, new_phaddr
;
1694 struct vir_region
*region
;
1695 struct phys_region
*ph
= NULL
, *prev_ph
= NULL
, *first_ph
= NULL
;
1699 /* Makes no sense if yielded block ID already exists, and
1700 * is likely a serious bug in the caller.
1703 blockid
.owner
= vmp
->vm_endpoint
;
1704 avl
= get_yielded_avl(blockid
);
1705 if(yielded_search(avl
, blockid
, AVL_EQUAL
)) {
1710 if((vaddr
% VM_PAGE_SIZE
) || pages
< 1) return EFAULT
;
1713 for(p
= 0; p
< pages
; p
++) {
1714 if(!(region
= map_lookup(vmp
, v
, &ph
)) || !ph
) {
1715 printf("VM: do_yield_block: not found for %d\n",
1719 if(!(region
->flags
& VR_ANON
)) {
1720 printf("VM: yieldblock: non-anon 0x%lx\n", v
);
1723 if(ph
->ph
->refcount
!= 1) {
1724 printf("VM: do_yield_block: mapped not once for %d\n",
1729 if(ph
->ph
->phys
!= prev_ph
->ph
->phys
+ VM_PAGE_SIZE
) {
1730 printf("VM: physically discontiguous yield\n");
1735 if(!first_ph
) first_ph
= ph
;
1739 /* Make a new block to record the yielding in. */
1740 if(!SLABALLOC(newyb
)) {
1744 assert(!(ph
->ph
->phys
% VM_PAGE_SIZE
));
1746 if((mem_clicks
= alloc_mem(pages
, PAF_CLEAR
)) == NO_MEM
) {
1751 /* Update yielded block info. */
1753 newyb
->id
= blockid
;
1754 newyb
->physaddr
= first_ph
->ph
->phys
;
1755 newyb
->pages
= pages
;
1756 newyb
->younger
= NULL
;);
1758 new_phaddr
= CLICK2ABS(mem_clicks
);
1760 /* Set new phys block to new addr and update pagetable. */
1762 for(p
= 0; p
< pages
; p
++) {
1763 region
= map_lookup(vmp
, v
, &ph
);
1764 assert(region
&& ph
);
1765 assert(ph
->ph
->refcount
== 1);
1767 ph
->ph
->phys
= new_phaddr
;);
1768 if(map_ph_writept(vmp
, region
, ph
) != OK
) {
1769 /* Presumably it was mapped, so there is no reason
1770 * updating should fail.
1772 panic("yield_block: couldn't write pt");
1775 new_phaddr
+= VM_PAGE_SIZE
;
1778 /* Remember yielded block. */
1780 yielded_insert(avl
, newyb
);
1783 /* Add to LRU list too. It's the youngest block. */
1788 lru_youngest
->younger
= newyb
;);
1794 newyb
->older
= lru_youngest
;);
1796 lru_youngest
= newyb
;
1806 /*===========================================================================*
1808 *===========================================================================*/
1809 int do_forgetblocks(message
*m
)
1813 endpoint_t caller
= m
->m_source
;
1815 if(vm_isokendpt(caller
, &n
) != OK
)
1816 panic("do_yield_block: message from strange source: %d",
1821 free_yielded_proc(vmp
);
1826 /*===========================================================================*
1828 *===========================================================================*/
1829 int do_forgetblock(message
*m
)
1833 endpoint_t caller
= m
->m_source
;
1839 if(vm_isokendpt(caller
, &n
) != OK
)
1840 panic("do_yield_block: message from strange source: %d",
1845 id
= make64(m
->VMFB_IDLO
, m
->VMFB_IDHI
);
1848 blockid
.owner
= vmp
->vm_endpoint
;
1849 avl
= get_yielded_avl(blockid
);
1850 if((yb
= yielded_search(avl
, blockid
, AVL_EQUAL
))) {
1851 freeyieldednode(yb
, 1);
1857 /*===========================================================================*
1858 * do_yieldblockgetblock *
1859 *===========================================================================*/
1860 int do_yieldblockgetblock(message
*m
)
1862 u64_t yieldid
, getid
;
1864 endpoint_t caller
= m
->m_source
;
1866 yielded_t
*yb
= NULL
;
1870 if(vm_isokendpt(caller
, &n
) != OK
)
1871 panic("do_yieldblockgetblock: message from strange source: %d",
1876 pages
= m
->VMYBGB_LEN
/ VM_PAGE_SIZE
;
1878 if((m
->VMYBGB_LEN
% VM_PAGE_SIZE
) || pages
< 1) {
1882 printf("vm: non-page-aligned or short block length\n");
1887 yieldid
= make64(m
->VMYBGB_YIELDIDLO
, m
->VMYBGB_YIELDIDHI
);
1888 getid
= make64(m
->VMYBGB_GETIDLO
, m
->VMYBGB_GETIDHI
);
1890 if(cmp64(yieldid
, VM_BLOCKID_NONE
) != 0) {
1891 /* A block was given to yield. */
1892 yieldblock(vmp
, yieldid
, (vir_bytes
) m
->VMYBGB_VADDR
, &yb
,
1896 if(cmp64(getid
, VM_BLOCKID_NONE
) != 0) {
1897 /* A block was given to get. */
1898 r
= getblock(vmp
, getid
, (vir_bytes
) m
->VMYBGB_VADDR
, pages
);
1904 void map_setparent(struct vmproc
*vmp
)
1907 struct vir_region
*vr
;
1908 region_start_iter_least(&vmp
->vm_regions_avl
, &iter
);
1909 while((vr
= region_get_iter(&iter
))) {
1910 USE(vr
, vr
->parent
= vmp
;);
1911 region_incr_iter(&iter
);
1915 int physregions(struct vir_region
*vr
)
1919 for(voffset
= 0; voffset
< vr
->length
; voffset
+= VM_PAGE_SIZE
) {
1920 if(physblock_get(vr
, voffset
))