3 #include <minix/callnr.h>
4 #include <minix/type.h>
5 #include <minix/config.h>
6 #include <minix/const.h>
7 #include <minix/sysutil.h>
8 #include <minix/syslib.h>
9 #include <minix/debug.h>
10 #include <minix/bitmap.h>
11 #include <minix/hash.h>
12 #include <machine/multiboot.h>
20 #include <sys/param.h>
27 #include "sanitycheck.h"
33 static yielded_t
*lru_youngest
= NULL
, *lru_oldest
= NULL
;
35 static int map_ph_writept(struct vmproc
*vmp
, struct vir_region
*vr
,
36 struct phys_region
*pr
);
38 static phys_bytes
freeyieldednode(yielded_t
*node
, int freemem
);
40 static struct vir_region
*map_copy_region(struct vmproc
*vmp
, struct
44 static void lrucheck(void);
47 /* hash table of yielded blocks */
48 #define YIELD_HASHSIZE 65536
49 static yielded_avl vm_yielded_blocks
[YIELD_HASHSIZE
];
51 static int avl_inited
= 0;
53 void map_region_init(void)
57 for(h
= 0; h
< YIELD_HASHSIZE
; h
++)
58 yielded_init(&vm_yielded_blocks
[h
]);
62 static yielded_avl
*get_yielded_avl(block_id_t id
)
68 hash_i_64(id
.owner
, id
.id
, h
);
69 h
= h
% YIELD_HASHSIZE
;
72 assert(h
< YIELD_HASHSIZE
);
74 return &vm_yielded_blocks
[h
];
77 void map_printregion(struct vmproc
*vmp
, struct vir_region
*vr
)
80 struct phys_region
*ph
;
81 printf("map_printmap: map_name: %s\n", vr
->memtype
->name
);
82 printf("\t%lx (len 0x%lx, %lukB), %p\n",
83 vr
->vaddr
, vr
->length
, vr
->length
/1024, vr
->memtype
->name
);
84 printf("\t\tphysblocks:\n");
85 physr_start_iter_least(vr
->phys
, &iter
);
86 while((ph
= physr_get_iter(&iter
))) {
87 printf("\t\t@ %lx (refs %d): phys 0x%lx\n",
88 (vr
->vaddr
+ ph
->offset
),
89 ph
->ph
->refcount
, ph
->ph
->phys
);
90 physr_incr_iter(&iter
);
94 /*===========================================================================*
96 *===========================================================================*/
97 void map_printmap(vmp
)
100 struct vir_region
*vr
;
103 printf("memory regions in process %d:\n", vmp
->vm_endpoint
);
105 region_start_iter_least(&vmp
->vm_regions_avl
, &iter
);
106 while((vr
= region_get_iter(&iter
))) {
107 map_printregion(vmp
, vr
);
108 region_incr_iter(&iter
);
112 static struct vir_region
*getnextvr(struct vir_region
*vr
)
114 struct vir_region
*nextvr
;
117 region_start_iter(&vr
->parent
->vm_regions_avl
, &v_iter
, vr
->vaddr
, AVL_EQUAL
);
118 assert(region_get_iter(&v_iter
));
119 assert(region_get_iter(&v_iter
) == vr
);
120 region_incr_iter(&v_iter
);
121 nextvr
= region_get_iter(&v_iter
);
122 if(!nextvr
) return NULL
;
124 assert(vr
->parent
== nextvr
->parent
);
125 assert(vr
->vaddr
< nextvr
->vaddr
);
126 assert(vr
->vaddr
+ vr
->length
<= nextvr
->vaddr
);
130 int pr_writable(struct vir_region
*vr
, struct phys_region
*pr
)
132 assert(vr
->memtype
->writable
);
133 return ((vr
->flags
& VR_WRITABLE
) && vr
->memtype
->writable(pr
));
138 /*===========================================================================*
139 * map_sanitycheck_pt *
140 *===========================================================================*/
141 static int map_sanitycheck_pt(struct vmproc
*vmp
,
142 struct vir_region
*vr
, struct phys_region
*pr
)
144 struct phys_block
*pb
= pr
->ph
;
148 if(pr_writable(vr
, pr
))
153 r
= pt_writemap(vmp
, &vmp
->vm_pt
, vr
->vaddr
+ pr
->offset
,
154 pb
->phys
, VM_PAGE_SIZE
, PTF_PRESENT
| PTF_USER
| rw
, WMF_VERIFY
);
157 printf("proc %d phys_region 0x%lx sanity check failed\n",
158 vmp
->vm_endpoint
, pr
->offset
);
159 map_printregion(vmp
, vr
);
165 /*===========================================================================*
167 *===========================================================================*/
168 void map_sanitycheck(char *file
, int line
)
174 /* Macro for looping over all physical blocks of all regions of
177 #define ALLREGIONS(regioncode, physcode) \
178 for(vmp = vmproc; vmp < &vmproc[VMP_NR]; vmp++) { \
179 region_iter v_iter; \
180 struct vir_region *vr; \
181 if(!(vmp->vm_flags & VMF_INUSE)) \
183 region_start_iter_least(&vmp->vm_regions_avl, &v_iter); \
184 while((vr = region_get_iter(&v_iter))) { \
186 struct phys_region *pr; \
188 physr_start_iter_least(vr->phys, &iter); \
189 while((pr = physr_get_iter(&iter))) { \
191 physr_incr_iter(&iter); \
193 region_incr_iter(&v_iter); \
197 #define MYSLABSANE(s) MYASSERT(slabsane_f(__FILE__, __LINE__, s, sizeof(*(s))))
198 /* Basic pointers check. */
199 ALLREGIONS(MYSLABSANE(vr
),MYSLABSANE(pr
); MYSLABSANE(pr
->ph
);MYSLABSANE(pr
->parent
));
200 ALLREGIONS(/* MYASSERT(vr->parent == vmp) */,MYASSERT(pr
->parent
== vr
););
202 /* Do counting for consistency check. */
203 ALLREGIONS(;,USE(pr
->ph
, pr
->ph
->seencount
= 0;););
204 ALLREGIONS(;,USE(pr
->ph
, pr
->ph
->seencount
++;);
205 if(pr
->ph
->seencount
== 1) {
206 if(pr
->parent
->memtype
->ev_sanitycheck
)
207 pr
->parent
->memtype
->ev_sanitycheck(pr
, file
, line
);
211 /* Do consistency check. */
212 ALLREGIONS({ struct vir_region
*nextvr
= getnextvr(vr
);
214 MYASSERT(vr
->vaddr
< nextvr
->vaddr
);
215 MYASSERT(vr
->vaddr
+ vr
->length
<= nextvr
->vaddr
);
218 MYASSERT(!(vr
->vaddr
% VM_PAGE_SIZE
));,
219 if(pr
->ph
->refcount
!= pr
->ph
->seencount
) {
221 printf("ph in vr %p: 0x%lx refcount %u "
222 "but seencount %u\n",
224 pr
->ph
->refcount
, pr
->ph
->seencount
);
228 struct phys_region
*others
;
229 if(pr
->ph
->refcount
> 0) {
230 MYASSERT(pr
->ph
->firstregion
);
231 if(pr
->ph
->refcount
== 1) {
232 MYASSERT(pr
->ph
->firstregion
== pr
);
235 MYASSERT(!pr
->ph
->firstregion
);
237 for(others
= pr
->ph
->firstregion
; others
;
238 others
= others
->next_ph_list
) {
240 MYASSERT(others
->ph
== pr
->ph
);
243 MYASSERT(pr
->ph
->refcount
== n_others
);
245 MYASSERT(pr
->ph
->refcount
== pr
->ph
->seencount
);
246 MYASSERT(!(pr
->offset
% VM_PAGE_SIZE
)););
247 ALLREGIONS(,MYASSERT(map_sanitycheck_pt(vmp
, vr
, pr
) == OK
));
250 #define LRUCHECK lrucheck()
252 static void lrucheck(void)
256 /* list is empty and ok if both ends point to null. */
257 if(!lru_youngest
&& !lru_oldest
)
260 /* if not, both should point to something. */
261 SLABSANE(lru_youngest
);
262 SLABSANE(lru_oldest
);
264 assert(!lru_youngest
->younger
);
265 assert(!lru_oldest
->older
);
267 for(list
= lru_youngest
; list
; list
= list
->older
) {
270 SLABSANE(list
->younger
);
271 assert(list
->younger
->older
== list
);
272 } else assert(list
== lru_youngest
);
274 SLABSANE(list
->older
);
275 assert(list
->older
->younger
== list
);
276 } else assert(list
== lru_oldest
);
280 void blockstats(void)
288 s
= getuptime(&ticks
);
294 for(list
= lru_youngest
; list
; list
= list
->older
) {
300 printf("%d blocks, %lukB; ", blocks
, mem
/1024);
309 /*=========================================================================*
311 *=========================================================================*/
312 static int map_ph_writept(struct vmproc
*vmp
, struct vir_region
*vr
,
313 struct phys_region
*pr
)
316 struct phys_block
*pb
= pr
->ph
;
322 assert(!(vr
->vaddr
% VM_PAGE_SIZE
));
323 assert(!(pr
->offset
% VM_PAGE_SIZE
));
324 assert(pb
->refcount
> 0);
326 if(pr_writable(vr
, pr
))
331 if(pt_writemap(vmp
, &vmp
->vm_pt
, vr
->vaddr
+ pr
->offset
,
332 pb
->phys
, VM_PAGE_SIZE
, PTF_PRESENT
| PTF_USER
| rw
,
336 WMF_OVERWRITE
) != OK
) {
337 printf("VM: map_writept: pt_writemap failed\n");
342 USE(pr
, pr
->written
= 1;);
348 #define SLOT_FAIL ((vir_bytes) -1)
350 /*===========================================================================*
351 * region_find_slot_range *
352 *===========================================================================*/
353 static vir_bytes
region_find_slot_range(struct vmproc
*vmp
,
354 vir_bytes minv
, vir_bytes maxv
, vir_bytes length
)
356 struct vir_region
*lastregion
;
357 vir_bytes startv
= 0;
361 SANITYCHECK(SCL_FUNCTIONS
);
363 /* Length must be reasonable. */
366 /* Special case: allow caller to set maxv to 0 meaning 'I want
367 * it to be mapped in right here.'
370 maxv
= minv
+ length
;
374 printf("region_find_slot: minv 0x%lx and bytes 0x%lx\n",
380 /* Basic input sanity checks. */
381 assert(!(length
% VM_PAGE_SIZE
));
383 printf("VM: 1 minv: 0x%lx maxv: 0x%lx length: 0x%lx\n",
389 if(minv
+ length
> maxv
)
392 #define FREEVRANGE_TRY(rangestart, rangeend) { \
393 vir_bytes frstart = (rangestart), frend = (rangeend); \
394 frstart = MAX(frstart, minv); \
395 frend = MIN(frend, maxv); \
396 if(frend > frstart && (frend - frstart) >= length) { \
397 startv = frend-length; \
401 #define FREEVRANGE(start, end) { \
402 assert(!foundflag); \
403 FREEVRANGE_TRY(((start)+VM_PAGE_SIZE), ((end)-VM_PAGE_SIZE)); \
405 FREEVRANGE_TRY((start), (end)); \
409 /* find region after maxv. */
410 region_start_iter(&vmp
->vm_regions_avl
, &iter
, maxv
, AVL_GREATER_EQUAL
);
411 lastregion
= region_get_iter(&iter
);
414 /* This is the free virtual address space after the last region. */
415 region_start_iter(&vmp
->vm_regions_avl
, &iter
, maxv
, AVL_LESS
);
416 lastregion
= region_get_iter(&iter
);
417 FREEVRANGE(lastregion
?
418 lastregion
->vaddr
+lastregion
->length
: 0, VM_DATATOP
);
422 struct vir_region
*vr
;
423 while((vr
= region_get_iter(&iter
)) && !foundflag
) {
424 struct vir_region
*nextvr
;
425 region_decr_iter(&iter
);
426 nextvr
= region_get_iter(&iter
);
427 FREEVRANGE(nextvr
? nextvr
->vaddr
+nextvr
->length
: 0,
436 /* However we got it, startv must be in the requested range. */
437 assert(startv
>= minv
);
438 assert(startv
< maxv
);
439 assert(startv
+ length
<= maxv
);
441 /* remember this position as a hint for next time. */
442 vmp
->vm_region_top
= startv
+ length
;
447 /*===========================================================================*
449 *===========================================================================*/
450 static vir_bytes
region_find_slot(struct vmproc
*vmp
,
451 vir_bytes minv
, vir_bytes maxv
, vir_bytes length
)
453 vir_bytes v
, hint
= vmp
->vm_region_top
;
455 /* use the top of the last inserted region as a minv hint if
456 * possible. remember that a zero maxv is a special case.
459 if(maxv
&& hint
< maxv
&& hint
>= minv
) {
460 v
= region_find_slot_range(vmp
, minv
, hint
, length
);
466 return region_find_slot_range(vmp
, minv
, maxv
, length
);
469 struct vir_region
*region_new(struct vmproc
*vmp
, vir_bytes startv
, vir_bytes length
,
470 int flags
, mem_type_t
*memtype
)
473 struct vir_region
*newregion
;
476 if(!(SLABALLOC(newregion
))) {
477 printf("vm: region_new: could not allocate\n");
481 /* Fill in node details. */
483 memset(newregion
, 0, sizeof(*newregion
));
484 newregion
->vaddr
= startv
;
485 newregion
->length
= length
;
486 newregion
->flags
= flags
;
487 newregion
->memtype
= memtype
;
488 newregion
->remaps
= 0;
489 newregion
->id
= id
++;
490 newregion
->lower
= newregion
->higher
= NULL
;
491 newregion
->parent
= vmp
;);
495 printf("VM: region_new: allocating phys avl failed\n");
499 USE(newregion
, newregion
->phys
= phavl
;);
500 physr_init(newregion
->phys
);
505 /*===========================================================================*
507 *===========================================================================*/
508 struct vir_region
*map_page_region(vmp
, minv
, maxv
, length
,
509 flags
, mapflags
, memtype
)
518 struct vir_region
*newregion
;
521 assert(!(length
% VM_PAGE_SIZE
));
523 SANITYCHECK(SCL_FUNCTIONS
);
525 startv
= region_find_slot(vmp
, minv
, maxv
, length
);
526 if (startv
== SLOT_FAIL
)
529 /* Now we want a new region. */
530 if(!(newregion
= region_new(vmp
, startv
, length
, flags
, memtype
))) {
531 printf("VM: map_page_region: allocating region failed\n");
535 /* If a new event is specified, invoke it. */
536 if(newregion
->memtype
->ev_new
) {
537 if(newregion
->memtype
->ev_new(newregion
) != OK
) {
538 /* ev_new will have freed and removed the region */
543 if(mapflags
& MF_PREALLOC
) {
544 if(map_handle_memory(vmp
, newregion
, 0, length
, 1) != OK
) {
545 printf("VM: map_page_region: prealloc failed\n");
547 SLABFREE(newregion
->phys
););
553 /* Pre-allocations should be uninitialized, but after that it's a
556 USE(newregion
, newregion
->flags
&= ~VR_UNINITIALIZED
;);
559 region_insert(&vmp
->vm_regions_avl
, newregion
);
562 assert(startv
== newregion
->vaddr
);
564 struct vir_region
*nextvr
;
565 if((nextvr
= getnextvr(newregion
))) {
566 assert(newregion
->vaddr
< nextvr
->vaddr
);
571 SANITYCHECK(SCL_FUNCTIONS
);
576 /*===========================================================================*
578 *===========================================================================*/
579 static int map_subfree(struct vir_region
*region
,
580 vir_bytes start
, vir_bytes len
)
582 struct phys_region
*pr
;
584 vir_bytes end
= start
+len
;
591 SLABSANE(region
->phys
);
592 physr_start_iter_least(region
->phys
, &iter
);
593 while((pr
= physr_get_iter(&iter
))) {
594 struct phys_region
*others
;
595 struct phys_block
*pb
;
599 for(others
= pb
->firstregion
; others
;
600 others
= others
->next_ph_list
) {
601 assert(others
->ph
== pb
);
603 physr_incr_iter(&iter
);
608 if(start
== 0 && len
== region
->length
)
611 physr_init_iter(&iter
);
612 physr_start_iter(region
->phys
, &iter
, start
, AVL_GREATER_EQUAL
);
613 while((pr
= physr_get_iter(&iter
))) {
614 physr_incr_iter(&iter
);
615 if(pr
->offset
>= end
)
617 pb_unreferenced(region
, pr
, !full
);
619 physr_start_iter(region
->phys
, &iter
,
620 pr
->offset
, AVL_GREATER_EQUAL
);
626 physr_init(region
->phys
);
631 /*===========================================================================*
633 *===========================================================================*/
634 int map_free(struct vir_region
*region
)
638 if((r
=map_subfree(region
, 0, region
->length
)) != OK
) {
639 printf("%d\n", __LINE__
);
643 if(region
->memtype
->ev_delete
)
644 region
->memtype
->ev_delete(region
);
647 SLABFREE(region
->phys
););
653 /*===========================================================================*
654 * yielded_block_cmp *
655 *===========================================================================*/
656 int yielded_block_cmp(struct block_id
*id1
, struct block_id
*id2
)
658 if(id1
->owner
< id2
->owner
)
660 if(id1
->owner
> id2
->owner
)
662 return cmp64(id1
->id
, id2
->id
);
666 /*===========================================================================*
667 * free_yielded_proc *
668 *===========================================================================*/
669 static vir_bytes
free_yielded_proc(struct vmproc
*vmp
)
674 SANITYCHECK(SCL_FUNCTIONS
);
676 /* Free associated regions. */
677 for(h
= 0; h
< YIELD_HASHSIZE
&& vmp
->vm_yielded
> 0; h
++) {
680 yielded_avl
*avl
= &vm_yielded_blocks
[h
];
681 yielded_start_iter_least(avl
, &iter
);
682 while((yb
= yielded_get_iter(&iter
))) {
685 yielded_incr_iter(&iter
);
686 if(yb
->id
.owner
!= vmp
->vm_endpoint
)
688 next_yb
= yielded_get_iter(&iter
);
689 total
+= freeyieldednode(yb
, 1);
690 /* the above removal invalidated our iter; restart it
691 * for the node we want to start at.
694 yielded_start_iter(avl
, &iter
, next_yb
->id
, AVL_EQUAL
);
695 assert(yielded_get_iter(&iter
) == next_yb
);
703 static phys_bytes
freeyieldednode(yielded_t
*node
, int freemem
)
705 yielded_t
*older
, *younger
, *removed
;
715 younger
= node
->younger
;
720 assert(younger
->older
== node
);
721 USE(younger
, younger
->older
= node
->older
;);
723 assert(node
== lru_youngest
);
724 lru_youngest
= node
->older
;
729 assert(older
->younger
== node
);
730 USE(older
, older
->younger
= node
->younger
;);
732 assert(node
== lru_oldest
);
733 lru_oldest
= node
->younger
;
740 if(vm_isokendpt(node
->id
.owner
, &p
) != OK
)
741 panic("out of date owner of yielded block %d", node
->id
.owner
);
742 avl
= get_yielded_avl(node
->id
);
743 removed
= yielded_remove(avl
, node
->id
);
744 assert(removed
== node
);
745 assert(vmproc
[p
].vm_yielded
> 0);
746 vmproc
[p
].vm_yielded
--;
748 /* Free associated memory if requested. */
751 free_mem(ABS2CLICK(node
->physaddr
), node
->pages
);
760 /*========================================================================*
762 *========================================================================*/
763 vir_bytes
free_yielded(vir_bytes max_bytes
)
766 /* PRIVATE yielded_t *lru_youngest = NULL, *lru_oldest = NULL; */
770 while(freed
< max_bytes
&& lru_oldest
) {
771 SLABSANE(lru_oldest
);
772 freed
+= freeyieldednode(lru_oldest
, 1);
779 /*========================================================================*
781 *========================================================================*/
782 int map_free_proc(vmp
)
785 struct vir_region
*r
;
787 while((r
= region_search_root(&vmp
->vm_regions_avl
))) {
788 SANITYCHECK(SCL_DETAIL
);
792 region_remove(&vmp
->vm_regions_avl
, r
->vaddr
); /* For sanity checks. */
797 SANITYCHECK(SCL_DETAIL
);
800 region_init(&vmp
->vm_regions_avl
);
802 /* Free associated yielded blocks. */
803 free_yielded_proc(vmp
);
805 SANITYCHECK(SCL_FUNCTIONS
);
810 /*===========================================================================*
812 *===========================================================================*/
813 struct vir_region
*map_lookup(vmp
, offset
, physr
)
816 struct phys_region
**physr
;
818 struct vir_region
*r
;
820 SANITYCHECK(SCL_FUNCTIONS
);
823 if(!region_search_root(&vmp
->vm_regions_avl
))
824 panic("process has no regions: %d", vmp
->vm_endpoint
);
827 if((r
= region_search(&vmp
->vm_regions_avl
, offset
, AVL_LESS_EQUAL
))) {
829 if(offset
>= r
->vaddr
&& offset
< r
->vaddr
+ r
->length
) {
830 ph
= offset
- r
->vaddr
;
832 *physr
= physr_search(r
->phys
, ph
, AVL_EQUAL
);
833 if(*physr
) assert((*physr
)->offset
== ph
);
839 SANITYCHECK(SCL_FUNCTIONS
);
844 u32_t
vrallocflags(u32_t flags
)
846 u32_t allocflags
= 0;
848 if(flags
& VR_PHYS64K
)
849 allocflags
|= PAF_ALIGN64K
;
850 if(flags
& VR_LOWER16MB
)
851 allocflags
|= PAF_LOWER16MB
;
852 if(flags
& VR_LOWER1MB
)
853 allocflags
|= PAF_LOWER1MB
;
854 if(flags
& VR_CONTIG
)
855 allocflags
|= PAF_CONTIG
;
856 if(!(flags
& VR_UNINITIALIZED
))
857 allocflags
|= PAF_CLEAR
;
862 /*===========================================================================*
863 * map_clone_ph_block *
864 *===========================================================================*/
865 struct phys_region
*map_clone_ph_block(vmp
, region
, ph
, iter
)
867 struct vir_region
*region
;
868 struct phys_region
*ph
;
874 struct phys_region
*newpr
;
875 int region_has_single_block
;
876 SANITYCHECK(SCL_FUNCTIONS
);
878 /* Warning: this function will free the passed
879 * phys_region *ph and replace it (in the same offset)
880 * with another! So both the pointer to it
881 * and any iterators over the phys_regions in the vir_region
882 * will be invalid on successful return. (Iterators over
883 * the vir_region could be invalid on unsuccessful return too.)
886 /* This is only to be done if there is more than one copy. */
887 assert(ph
->ph
->refcount
> 1);
889 /* This function takes a physical block, copies its contents
890 * into newly allocated memory, and replaces the single physical
891 * block by one or more physical blocks with refcount 1 with the
892 * same contents as the original. In other words, a fragmentable
893 * version of map_copy_ph_block().
896 /* Remember where and how much. */
898 physaddr
= ph
->ph
->phys
;
900 /* Now unlink the original physical block so we can replace
906 assert(ph
->ph
->refcount
> 1);
907 pb_unreferenced(region
, ph
, 1);
910 SANITYCHECK(SCL_DETAIL
);
912 /* Put new free memory in. */
913 allocflags
= vrallocflags(region
->flags
| VR_UNINITIALIZED
);
914 region_has_single_block
= (offset
== 0 && region
->length
== VM_PAGE_SIZE
);
915 assert(region_has_single_block
|| !(allocflags
& PAF_CONTIG
));
916 assert(!(allocflags
& PAF_CLEAR
));
918 if(map_pf(vmp
, region
, offset
, 1) != OK
) {
919 /* XXX original range now gone. */
920 printf("VM: map_clone_ph_block: map_pf failed.\n");
924 /* Copy the block to the new memory.
925 * Can only fail if map_new_physblock didn't do what we asked.
927 if(copy_abs2region(physaddr
, region
, offset
, VM_PAGE_SIZE
) != OK
)
928 panic("copy_abs2region failed, no good reason for that");
930 newpr
= physr_search(region
->phys
, offset
, AVL_EQUAL
);
932 assert(newpr
->offset
== offset
);
935 physr_start_iter(region
->phys
, iter
, offset
, AVL_EQUAL
);
936 assert(physr_get_iter(iter
) == newpr
);
939 SANITYCHECK(SCL_FUNCTIONS
);
945 /*===========================================================================*
947 *===========================================================================*/
948 int map_pf(vmp
, region
, offset
, write
)
950 struct vir_region
*region
;
954 struct phys_region
*ph
;
957 offset
-= offset
% VM_PAGE_SIZE
;
960 assert(offset
< region
->length
);
962 assert(!(region
->vaddr
% VM_PAGE_SIZE
));
963 assert(!(write
&& !(region
->flags
& VR_WRITABLE
)));
965 SANITYCHECK(SCL_FUNCTIONS
);
967 if(!(ph
= physr_search(region
->phys
, offset
, AVL_EQUAL
))) {
968 struct phys_block
*pb
;
972 if(!(pb
= pb_new(MAP_NONE
))) {
973 printf("map_pf: pb_new failed\n");
977 if(!(ph
= pb_reference(pb
, offset
, region
))) {
978 printf("map_pf: pb_reference failed\n");
987 /* If we're writing and the block is already
988 * writable, nothing to do.
991 assert(region
->memtype
->writable
);
993 if(!write
|| !region
->memtype
->writable(ph
)) {
994 assert(region
->memtype
->ev_pagefault
);
996 if((r
= region
->memtype
->ev_pagefault(vmp
,
997 region
, ph
, write
)) == SUSPEND
) {
998 panic("map_pf: memtype->ev_pagefault returned SUSPEND\n");
1003 printf("map_pf: memtype->ev_pagefault failed\n");
1005 pb_unreferenced(region
, ph
, 1);
1015 if((r
= map_ph_writept(vmp
, region
, ph
)) != OK
) {
1016 printf("map_pf: writept failed\n");
1020 SANITYCHECK(SCL_FUNCTIONS
);
1023 if(OK
!= pt_checkrange(&vmp
->vm_pt
, region
->vaddr
+offset
,
1024 VM_PAGE_SIZE
, write
)) {
1025 panic("map_pf: pt_checkrange failed: %d", r
);
1032 int map_handle_memory(vmp
, region
, start_offset
, length
, write
)
1034 struct vir_region
*region
;
1035 vir_bytes start_offset
;
1039 vir_bytes offset
, lim
;
1043 lim
= start_offset
+ length
;
1044 assert(lim
> start_offset
);
1046 for(offset
= start_offset
; offset
< lim
; offset
+= VM_PAGE_SIZE
)
1047 if((r
= map_pf(vmp
, region
, offset
, write
)) != OK
)
1053 /*===========================================================================*
1055 *===========================================================================*/
1056 int map_pin_memory(struct vmproc
*vmp
)
1058 struct vir_region
*vr
;
1061 region_start_iter_least(&vmp
->vm_regions_avl
, &iter
);
1062 /* Scan all memory regions. */
1063 while((vr
= region_get_iter(&iter
))) {
1064 /* Make sure region is mapped to physical memory and writable.*/
1065 r
= map_handle_memory(vmp
, vr
, 0, vr
->length
, 1);
1067 panic("map_pin_memory: map_handle_memory failed: %d", r
);
1069 region_incr_iter(&iter
);
1075 static int count_phys_regions(struct vir_region
*vr
)
1078 struct phys_region
*ph
;
1080 physr_start_iter_least(vr
->phys
, &iter
);
1081 while((ph
= physr_get_iter(&iter
))) {
1083 physr_incr_iter(&iter
);
1089 /*===========================================================================*
1091 *===========================================================================*/
1092 static struct vir_region
*map_copy_region(struct vmproc
*vmp
, struct vir_region
*vr
)
1094 /* map_copy_region creates a complete copy of the vir_region
1095 * data structure, linking in the same phys_blocks directly,
1096 * but all in limbo, i.e., the caller has to link the vir_region
1097 * to a process. Therefore it doesn't increase the refcount in
1098 * the phys_block; the caller has to do this once it's linked.
1099 * The reason for this is to keep the sanity checks working
1100 * within this function.
1102 struct vir_region
*newvr
;
1103 struct phys_region
*ph
;
1108 cr
= count_phys_regions(vr
);
1111 if(!(newvr
= region_new(vr
->parent
, vr
->vaddr
, vr
->length
, vr
->flags
, vr
->memtype
)))
1114 if(vr
->memtype
->ev_copy
&& (r
=vr
->memtype
->ev_copy(vr
, newvr
)) != OK
) {
1116 printf("VM: memtype-specific copy failed (%d)\n", r
);
1120 physr_start_iter_least(vr
->phys
, &iter
);
1121 while((ph
= physr_get_iter(&iter
))) {
1122 struct phys_region
*newph
= pb_reference(ph
->ph
, ph
->offset
, newvr
);
1124 if(!newph
) { map_free(newvr
); return NULL
; }
1127 USE(newph
, newph
->written
= 0;);
1128 assert(count_phys_regions(vr
) == cr
);
1130 physr_incr_iter(&iter
);
1134 assert(count_phys_regions(vr
) == count_phys_regions(newvr
));
1140 /*===========================================================================*
1142 *===========================================================================*/
1143 int copy_abs2region(phys_bytes abs
, struct vir_region
*destregion
,
1144 phys_bytes offset
, phys_bytes len
)
1148 assert(destregion
->phys
);
1150 phys_bytes sublen
, suboffset
;
1151 struct phys_region
*ph
;
1153 assert(destregion
->phys
);
1154 if(!(ph
= physr_search(destregion
->phys
, offset
, AVL_LESS_EQUAL
))) {
1155 printf("VM: copy_abs2region: no phys region found (1).\n");
1158 assert(ph
->offset
<= offset
);
1159 if(ph
->offset
+VM_PAGE_SIZE
<= offset
) {
1160 printf("VM: copy_abs2region: no phys region found (2).\n");
1163 suboffset
= offset
- ph
->offset
;
1164 assert(suboffset
< VM_PAGE_SIZE
);
1166 if(sublen
> VM_PAGE_SIZE
- suboffset
)
1167 sublen
= VM_PAGE_SIZE
- suboffset
;
1168 assert(suboffset
+ sublen
<= VM_PAGE_SIZE
);
1169 if(ph
->ph
->refcount
!= 1) {
1170 printf("VM: copy_abs2region: refcount not 1.\n");
1174 if(sys_abscopy(abs
, ph
->ph
->phys
+ suboffset
, sublen
) != OK
) {
1175 printf("VM: copy_abs2region: abscopy failed.\n");
1186 /*=========================================================================*
1188 *=========================================================================*/
1189 int map_writept(struct vmproc
*vmp
)
1191 struct vir_region
*vr
;
1192 struct phys_region
*ph
;
1195 region_start_iter_least(&vmp
->vm_regions_avl
, &v_iter
);
1197 while((vr
= region_get_iter(&v_iter
))) {
1199 physr_start_iter_least(vr
->phys
, &ph_iter
);
1201 while((ph
= physr_get_iter(&ph_iter
))) {
1202 physr_incr_iter(&ph_iter
);
1204 if((r
=map_ph_writept(vmp
, vr
, ph
)) != OK
) {
1205 printf("VM: map_writept: failed\n");
1209 region_incr_iter(&v_iter
);
1215 /*========================================================================*
1217 *========================================================================*/
1218 int map_proc_copy(dst
, src
)
1222 /* Copy all the memory regions from the src process to the dst process. */
1223 region_init(&dst
->vm_regions_avl
);
1225 return map_proc_copy_from(dst
, src
, NULL
);
1228 /*========================================================================*
1229 * map_proc_copy_from *
1230 *========================================================================*/
1231 int map_proc_copy_from(dst
, src
, start_src_vr
)
1234 struct vir_region
*start_src_vr
;
1236 struct vir_region
*vr
;
1240 start_src_vr
= region_search_least(&src
->vm_regions_avl
);
1242 assert(start_src_vr
);
1243 assert(start_src_vr
->parent
== src
);
1244 region_start_iter(&src
->vm_regions_avl
, &v_iter
,
1245 start_src_vr
->vaddr
, AVL_EQUAL
);
1246 assert(region_get_iter(&v_iter
) == start_src_vr
);
1248 /* Copy source regions after the destination's last region (if any). */
1250 SANITYCHECK(SCL_FUNCTIONS
);
1252 while((vr
= region_get_iter(&v_iter
))) {
1253 physr_iter iter_orig
, iter_new
;
1254 struct vir_region
*newvr
;
1255 struct phys_region
*orig_ph
, *new_ph
;
1256 if(!(newvr
= map_copy_region(dst
, vr
))) {
1260 USE(newvr
, newvr
->parent
= dst
;);
1261 region_insert(&dst
->vm_regions_avl
, newvr
);
1262 physr_start_iter_least(vr
->phys
, &iter_orig
);
1263 physr_start_iter_least(newvr
->phys
, &iter_new
);
1264 while((orig_ph
= physr_get_iter(&iter_orig
))) {
1265 struct phys_block
*pb
;
1266 new_ph
= physr_get_iter(&iter_new
);
1267 /* Check two physregions both are nonnull,
1268 * are different, and match physblocks.
1272 assert(orig_ph
!= new_ph
);
1274 assert(orig_ph
->ph
== new_ph
->ph
);
1276 /* Get next new physregion */
1277 physr_incr_iter(&iter_orig
);
1278 physr_incr_iter(&iter_new
);
1280 assert(!physr_get_iter(&iter_new
));
1281 region_incr_iter(&v_iter
);
1287 SANITYCHECK(SCL_FUNCTIONS
);
1291 int map_region_extend_upto_v(struct vmproc
*vmp
, vir_bytes v
)
1293 vir_bytes offset
= v
;
1294 struct vir_region
*vr
, *nextvr
;
1296 offset
= roundup(offset
, VM_PAGE_SIZE
);
1298 if(!(vr
= region_search(&vmp
->vm_regions_avl
, offset
, AVL_LESS
))) {
1299 printf("VM: nothing to extend\n");
1303 assert(vr
->vaddr
<= offset
);
1304 if((nextvr
= getnextvr(vr
))) {
1305 assert(offset
<= nextvr
->vaddr
);
1308 if(nextvr
&& nextvr
->vaddr
< offset
) {
1309 printf("VM: can't grow into next region\n");
1313 if(!vr
->memtype
->ev_resize
) {
1314 printf("VM: can't resize this type of memory\n");
1318 return vr
->memtype
->ev_resize(vmp
, vr
, offset
- vr
->vaddr
);
1321 /*========================================================================*
1322 * map_unmap_region *
1323 *========================================================================*/
1324 int map_unmap_region(struct vmproc
*vmp
, struct vir_region
*r
,
1325 vir_bytes offset
, vir_bytes len
)
1327 /* Shrink the region by 'len' bytes, from the start. Unreference
1328 * memory it used to reference if any.
1330 vir_bytes regionstart
;
1332 SANITYCHECK(SCL_FUNCTIONS
);
1334 if(offset
+len
> r
->length
|| (len
% VM_PAGE_SIZE
)) {
1335 printf("VM: bogus length 0x%lx\n", len
);
1339 regionstart
= r
->vaddr
+ offset
;
1341 /* unreference its memory */
1342 map_subfree(r
, offset
, len
);
1344 /* if unmap was at start/end of this region, it actually shrinks */
1346 struct phys_region
*pr
;
1349 region_remove(&vmp
->vm_regions_avl
, r
->vaddr
);
1355 region_insert(&vmp
->vm_regions_avl
, r
);
1357 /* vaddr has increased; to make all the phys_regions
1358 * point to the same addresses, make them shrink by the
1361 physr_init_iter(&iter
);
1362 physr_start_iter(r
->phys
, &iter
, offset
, AVL_GREATER_EQUAL
);
1364 while((pr
= physr_get_iter(&iter
))) {
1365 assert(pr
->offset
>= offset
);
1366 USE(pr
, pr
->offset
-= len
;);
1367 physr_incr_iter(&iter
);
1369 } else if(offset
+ len
== r
->length
) {
1370 assert(len
<= r
->length
);
1374 if(r
->length
== 0) {
1375 /* Whole region disappears. Unlink and free it. */
1376 region_remove(&vmp
->vm_regions_avl
, r
->vaddr
);
1380 SANITYCHECK(SCL_DETAIL
);
1382 if(pt_writemap(vmp
, &vmp
->vm_pt
, regionstart
,
1383 MAP_NONE
, len
, 0, WMF_OVERWRITE
) != OK
) {
1384 printf("VM: map_unmap_region: pt_writemap failed\n");
1388 SANITYCHECK(SCL_FUNCTIONS
);
1393 /*========================================================================*
1395 *========================================================================*/
1396 int map_get_phys(struct vmproc
*vmp
, vir_bytes addr
, phys_bytes
*r
)
1398 struct vir_region
*vr
;
1400 if (!(vr
= map_lookup(vmp
, addr
, NULL
)) ||
1401 (vr
->vaddr
!= addr
))
1404 if (!vr
->memtype
->regionid
)
1408 *r
= vr
->memtype
->regionid(vr
);
1413 /*========================================================================*
1415 *========================================================================*/
1416 int map_get_ref(struct vmproc
*vmp
, vir_bytes addr
, u8_t
*cnt
)
1418 struct vir_region
*vr
;
1420 if (!(vr
= map_lookup(vmp
, addr
, NULL
)) ||
1421 (vr
->vaddr
!= addr
) || !vr
->memtype
->refcount
)
1425 *cnt
= vr
->memtype
->refcount(vr
);
1430 /*========================================================================*
1432 *========================================================================*/
1433 void get_stats_info(struct vm_stats_info
*vsi
)
1437 vsi
->vsi_cached
= 0L;
1439 for(yb
= lru_youngest
; yb
; yb
= yb
->older
)
1443 void get_usage_info_kernel(struct vm_usage_info
*vui
)
1445 memset(vui
, 0, sizeof(*vui
));
1446 vui
->vui_total
= kernel_boot_info
.kernel_allocated_bytes
;
1449 static void get_usage_info_vm(struct vm_usage_info
*vui
)
1451 memset(vui
, 0, sizeof(*vui
));
1452 vui
->vui_total
= kernel_boot_info
.vm_allocated_bytes
+
1453 get_vm_self_pages() * VM_PAGE_SIZE
;
1456 /*========================================================================*
1458 *========================================================================*/
1459 void get_usage_info(struct vmproc
*vmp
, struct vm_usage_info
*vui
)
1461 struct vir_region
*vr
;
1463 struct phys_region
*ph
;
1465 region_start_iter_least(&vmp
->vm_regions_avl
, &v_iter
);
1467 memset(vui
, 0, sizeof(*vui
));
1469 if(vmp
->vm_endpoint
== VM_PROC_NR
) {
1470 get_usage_info_vm(vui
);
1474 if(vmp
->vm_endpoint
< 0) {
1475 get_usage_info_kernel(vui
);
1479 while((vr
= region_get_iter(&v_iter
))) {
1480 physr_start_iter_least(vr
->phys
, &iter
);
1481 while((ph
= physr_get_iter(&iter
))) {
1482 /* All present pages are counted towards the total. */
1483 vui
->vui_total
+= VM_PAGE_SIZE
;
1485 if (ph
->ph
->refcount
> 1) {
1486 /* Any page with a refcount > 1 is common. */
1487 vui
->vui_common
+= VM_PAGE_SIZE
;
1489 /* Any common, non-COW page is shared. */
1490 if (vr
->flags
& VR_SHARED
)
1491 vui
->vui_shared
+= VM_PAGE_SIZE
;
1493 physr_incr_iter(&iter
);
1495 region_incr_iter(&v_iter
);
1499 /*===========================================================================*
1501 *===========================================================================*/
1502 int get_region_info(struct vmproc
*vmp
, struct vm_region_info
*vri
,
1503 int max
, vir_bytes
*nextp
)
1505 struct vir_region
*vr
;
1514 region_start_iter(&vmp
->vm_regions_avl
, &v_iter
, next
, AVL_GREATER_EQUAL
);
1515 if(!(vr
= region_get_iter(&v_iter
))) return 0;
1517 for(count
= 0; (vr
= region_get_iter(&v_iter
)) && count
< max
; count
++, vri
++) {
1518 struct phys_region
*ph1
, *ph2
;
1520 /* Report part of the region that's actually in use. */
1522 /* Get first and last phys_regions, if any */
1523 ph1
= physr_search_least(vr
->phys
);
1524 ph2
= physr_search_greatest(vr
->phys
);
1525 if(!ph1
|| !ph2
) { assert(!ph1
&& !ph2
); continue; }
1527 /* Report start+length of region starting from lowest use. */
1528 vri
->vri_addr
= vr
->vaddr
+ ph1
->offset
;
1530 vri
->vri_length
= ph2
->offset
+ VM_PAGE_SIZE
- ph1
->offset
;
1532 /* "AND" the provided protection with per-page protection. */
1533 if (!(vr
->flags
& VR_WRITABLE
))
1534 vri
->vri_prot
&= ~PROT_WRITE
;
1536 next
= vr
->vaddr
+ vr
->length
;
1537 region_incr_iter(&v_iter
);
1544 /*========================================================================*
1545 * regionprintstats *
1546 *========================================================================*/
1547 void printregionstats(struct vmproc
*vmp
)
1549 struct vir_region
*vr
;
1550 struct phys_region
*pr
;
1552 vir_bytes used
= 0, weighted
= 0;
1554 region_start_iter_least(&vmp
->vm_regions_avl
, &v_iter
);
1556 while((vr
= region_get_iter(&v_iter
))) {
1557 region_incr_iter(&v_iter
);
1558 if(vr
->flags
& VR_DIRECT
)
1560 physr_start_iter_least(vr
->phys
, &iter
);
1561 while((pr
= physr_get_iter(&iter
))) {
1562 physr_incr_iter(&iter
);
1563 used
+= VM_PAGE_SIZE
;
1564 weighted
+= VM_PAGE_SIZE
/ pr
->ph
->refcount
;
1568 printf("%6lukB %6lukB\n", used
/1024, weighted
/1024);
1573 /*===========================================================================*
1574 * get_clean_phys_region *
1575 *===========================================================================*/
1576 static struct phys_region
*
1577 get_clean_phys_region(struct vmproc
*vmp
, vir_bytes vaddr
, struct vir_region
**ret_region
)
1579 struct vir_region
*region
;
1581 struct phys_region
*ph
;
1585 if(!(region
= map_lookup(vmp
, mapaddr
, &ph
)) || !ph
) {
1586 printf("VM: get_clean_phys_region: 0x%lx not found\n", vaddr
);
1590 assert(mapaddr
>= region
->vaddr
);
1591 assert(mapaddr
< region
->vaddr
+ region
->length
);
1593 /* If it's mapped more than once, make a copy. */
1594 assert(ph
->ph
->refcount
> 0);
1595 if(ph
->ph
->refcount
> 1) {
1596 if(!(ph
= map_clone_ph_block(vmp
, region
,
1598 printf("VM: get_clean_phys_region: ph copy failed\n");
1603 assert(ph
->ph
->refcount
== 1);
1605 *ret_region
= region
;
1610 static int getblock(struct vmproc
*vmp
, u64_t id
, vir_bytes vaddr
, int pages
)
1613 struct phys_region
*ph
;
1614 struct vir_region
*region
;
1620 /* Try to get the yielded block */
1621 blockid
.owner
= vmp
->vm_endpoint
;
1623 avl
= get_yielded_avl(blockid
);
1624 if(!(yb
= yielded_search(avl
, blockid
, AVL_EQUAL
))) {
1628 if(yb
->pages
!= pages
) {
1629 printf("VM: getblock: length mismatch (%d != %d)\n",
1634 phaddr
= yb
->physaddr
;
1636 for(p
= 0; p
< pages
; p
++) {
1637 /* Get the intended phys region, make sure refcount is 1. */
1638 if(!(ph
= get_clean_phys_region(vmp
, vaddr
, ®ion
))) {
1639 printf("VM: getblock: not found for %d\n", vmp
->vm_endpoint
);
1643 assert(ph
->ph
->refcount
== 1);
1645 /* Free the block that is currently there. */
1646 free_mem(ABS2CLICK(ph
->ph
->phys
), 1);
1648 /* Set the phys block to new addr and update pagetable. */
1649 USE(ph
->ph
, ph
->ph
->phys
= phaddr
;);
1650 if(map_ph_writept(vmp
, region
, ph
) != OK
) {
1651 /* Presumably it was mapped, so there is no reason
1652 * updating should fail.
1654 panic("do_get_block: couldn't write pt");
1657 vaddr
+= VM_PAGE_SIZE
;
1658 phaddr
+= VM_PAGE_SIZE
;
1661 /* Forget about the yielded block and free the struct. */
1662 freeyieldednode(yb
, 0);
1667 static int yieldblock(struct vmproc
*vmp
, u64_t id
,
1668 vir_bytes vaddr
, yielded_t
**retyb
, int pages
)
1671 vir_bytes mem_clicks
, v
, p
, new_phaddr
;
1672 struct vir_region
*region
;
1673 struct phys_region
*ph
= NULL
, *prev_ph
= NULL
, *first_ph
= NULL
;
1677 /* Makes no sense if yielded block ID already exists, and
1678 * is likely a serious bug in the caller.
1681 blockid
.owner
= vmp
->vm_endpoint
;
1682 avl
= get_yielded_avl(blockid
);
1683 if(yielded_search(avl
, blockid
, AVL_EQUAL
)) {
1688 if((vaddr
% VM_PAGE_SIZE
) || pages
< 1) return EFAULT
;
1691 for(p
= 0; p
< pages
; p
++) {
1692 if(!(region
= map_lookup(vmp
, v
, &ph
)) || !ph
) {
1693 printf("VM: do_yield_block: not found for %d\n",
1697 if(!(region
->flags
& VR_ANON
)) {
1698 printf("VM: yieldblock: non-anon 0x%lx\n", v
);
1701 if(ph
->ph
->refcount
!= 1) {
1702 printf("VM: do_yield_block: mapped not once for %d\n",
1707 if(ph
->ph
->phys
!= prev_ph
->ph
->phys
+ VM_PAGE_SIZE
) {
1708 printf("VM: physically discontiguous yield\n");
1713 if(!first_ph
) first_ph
= ph
;
1717 /* Make a new block to record the yielding in. */
1718 if(!SLABALLOC(newyb
)) {
1722 assert(!(ph
->ph
->phys
% VM_PAGE_SIZE
));
1724 if((mem_clicks
= alloc_mem(pages
, PAF_CLEAR
)) == NO_MEM
) {
1729 /* Update yielded block info. */
1731 newyb
->id
= blockid
;
1732 newyb
->physaddr
= first_ph
->ph
->phys
;
1733 newyb
->pages
= pages
;
1734 newyb
->younger
= NULL
;);
1736 new_phaddr
= CLICK2ABS(mem_clicks
);
1738 /* Set new phys block to new addr and update pagetable. */
1740 for(p
= 0; p
< pages
; p
++) {
1741 region
= map_lookup(vmp
, v
, &ph
);
1742 assert(region
&& ph
);
1743 assert(ph
->ph
->refcount
== 1);
1745 ph
->ph
->phys
= new_phaddr
;);
1746 if(map_ph_writept(vmp
, region
, ph
) != OK
) {
1747 /* Presumably it was mapped, so there is no reason
1748 * updating should fail.
1750 panic("yield_block: couldn't write pt");
1753 new_phaddr
+= VM_PAGE_SIZE
;
1756 /* Remember yielded block. */
1758 yielded_insert(avl
, newyb
);
1761 /* Add to LRU list too. It's the youngest block. */
1766 lru_youngest
->younger
= newyb
;);
1772 newyb
->older
= lru_youngest
;);
1774 lru_youngest
= newyb
;
1784 /*===========================================================================*
1786 *===========================================================================*/
1787 int do_forgetblocks(message
*m
)
1791 endpoint_t caller
= m
->m_source
;
1793 if(vm_isokendpt(caller
, &n
) != OK
)
1794 panic("do_yield_block: message from strange source: %d",
1799 free_yielded_proc(vmp
);
1804 /*===========================================================================*
1806 *===========================================================================*/
1807 int do_forgetblock(message
*m
)
1811 endpoint_t caller
= m
->m_source
;
1817 if(vm_isokendpt(caller
, &n
) != OK
)
1818 panic("do_yield_block: message from strange source: %d",
1823 id
= make64(m
->VMFB_IDLO
, m
->VMFB_IDHI
);
1826 blockid
.owner
= vmp
->vm_endpoint
;
1827 avl
= get_yielded_avl(blockid
);
1828 if((yb
= yielded_search(avl
, blockid
, AVL_EQUAL
))) {
1829 freeyieldednode(yb
, 1);
1835 /*===========================================================================*
1836 * do_yieldblockgetblock *
1837 *===========================================================================*/
1838 int do_yieldblockgetblock(message
*m
)
1840 u64_t yieldid
, getid
;
1842 endpoint_t caller
= m
->m_source
;
1844 yielded_t
*yb
= NULL
;
1848 if(vm_isokendpt(caller
, &n
) != OK
)
1849 panic("do_yieldblockgetblock: message from strange source: %d",
1854 pages
= m
->VMYBGB_LEN
/ VM_PAGE_SIZE
;
1856 if((m
->VMYBGB_LEN
% VM_PAGE_SIZE
) || pages
< 1) {
1860 printf("vm: non-page-aligned or short block length\n");
1865 yieldid
= make64(m
->VMYBGB_YIELDIDLO
, m
->VMYBGB_YIELDIDHI
);
1866 getid
= make64(m
->VMYBGB_GETIDLO
, m
->VMYBGB_GETIDHI
);
1868 if(cmp64(yieldid
, VM_BLOCKID_NONE
) != 0) {
1869 /* A block was given to yield. */
1870 yieldblock(vmp
, yieldid
, (vir_bytes
) m
->VMYBGB_VADDR
, &yb
,
1874 if(cmp64(getid
, VM_BLOCKID_NONE
) != 0) {
1875 /* A block was given to get. */
1876 r
= getblock(vmp
, getid
, (vir_bytes
) m
->VMYBGB_VADDR
, pages
);
1882 void map_setparent(struct vmproc
*vmp
)
1885 struct vir_region
*vr
;
1886 region_start_iter_least(&vmp
->vm_regions_avl
, &iter
);
1887 while((vr
= region_get_iter(&iter
))) {
1888 USE(vr
, vr
->parent
= vmp
;);
1889 region_incr_iter(&iter
);
1893 int physregions(struct vir_region
*vr
)
1897 physr_start_iter_least(vr
->phys
, &iter
);
1898 while(physr_get_iter(&iter
)) {
1900 physr_incr_iter(&iter
);