Remove building with NOCRYPTO option
[minix.git] / minix / servers / vm / region.c
blobe13c0afb9c2eaf8d71516a47e985c5c408f87ffa
2 #include <minix/com.h>
3 #include <minix/callnr.h>
4 #include <minix/type.h>
5 #include <minix/config.h>
6 #include <minix/const.h>
7 #include <minix/sysutil.h>
8 #include <minix/syslib.h>
9 #include <minix/debug.h>
10 #include <minix/bitmap.h>
11 #include <minix/hash.h>
12 #include <machine/multiboot.h>
14 #include <sys/mman.h>
16 #include <limits.h>
17 #include <stdlib.h>
18 #include <string.h>
19 #include <assert.h>
20 #include <stdint.h>
21 #include <sys/param.h>
23 #include "vm.h"
24 #include "proto.h"
25 #include "util.h"
26 #include "glo.h"
27 #include "region.h"
28 #include "sanitycheck.h"
29 #include "memlist.h"
30 #include "memtype.h"
31 #include "regionavl.h"
33 static struct vir_region *map_copy_region(struct vmproc *vmp, struct
34 vir_region *vr);
36 void map_region_init(void)
40 static void map_printregion(struct vir_region *vr)
42 unsigned int i;
43 struct phys_region *ph;
44 printf("map_printmap: map_name: %s\n", vr->def_memtype->name);
45 printf("\t%lx (len 0x%lx, %lukB), %p, %s\n",
46 vr->vaddr, vr->length, vr->length/1024,
47 vr->def_memtype->name,
48 (vr->flags & VR_WRITABLE) ? "writable" : "readonly");
49 printf("\t\tphysblocks:\n");
50 for(i = 0; i < vr->length/VM_PAGE_SIZE; i++) {
51 if(!(ph=vr->physblocks[i])) continue;
52 printf("\t\t@ %lx (refs %d): phys 0x%lx, %s\n",
53 (vr->vaddr + ph->offset),
54 ph->ph->refcount, ph->ph->phys,
55 pt_writable(vr->parent, vr->vaddr + ph->offset) ? "W" : "R");
60 struct phys_region *physblock_get(struct vir_region *region, vir_bytes offset)
62 int i;
63 struct phys_region *foundregion;
64 assert(!(offset % VM_PAGE_SIZE));
65 assert( /* offset >= 0 && */ offset < region->length);
66 i = offset/VM_PAGE_SIZE;
67 if((foundregion = region->physblocks[i]))
68 assert(foundregion->offset == offset);
69 return foundregion;
72 void physblock_set(struct vir_region *region, vir_bytes offset,
73 struct phys_region *newphysr)
75 int i;
76 struct vmproc *proc;
77 assert(!(offset % VM_PAGE_SIZE));
78 assert( /* offset >= 0 && */ offset < region->length);
79 i = offset/VM_PAGE_SIZE;
80 proc = region->parent;
81 assert(proc);
82 if(newphysr) {
83 assert(!region->physblocks[i]);
84 assert(newphysr->offset == offset);
85 proc->vm_total += VM_PAGE_SIZE;
86 if (proc->vm_total > proc->vm_total_max)
87 proc->vm_total_max = proc->vm_total;
88 } else {
89 assert(region->physblocks[i]);
90 proc->vm_total -= VM_PAGE_SIZE;
92 region->physblocks[i] = newphysr;
95 /*===========================================================================*
96 * map_printmap *
97 *===========================================================================*/
98 void map_printmap(struct vmproc *vmp)
100 struct vir_region *vr;
101 region_iter iter;
103 printf("memory regions in process %d:\n", vmp->vm_endpoint);
105 region_start_iter_least(&vmp->vm_regions_avl, &iter);
106 while((vr = region_get_iter(&iter))) {
107 map_printregion(vr);
108 region_incr_iter(&iter);
112 static struct vir_region *getnextvr(struct vir_region *vr)
114 struct vir_region *nextvr;
115 region_iter v_iter;
116 SLABSANE(vr);
117 region_start_iter(&vr->parent->vm_regions_avl, &v_iter, vr->vaddr, AVL_EQUAL);
118 assert(region_get_iter(&v_iter));
119 assert(region_get_iter(&v_iter) == vr);
120 region_incr_iter(&v_iter);
121 nextvr = region_get_iter(&v_iter);
122 if(!nextvr) return NULL;
123 SLABSANE(nextvr);
124 assert(vr->parent == nextvr->parent);
125 assert(vr->vaddr < nextvr->vaddr);
126 assert(vr->vaddr + vr->length <= nextvr->vaddr);
127 return nextvr;
130 static int pr_writable(struct vir_region *vr, struct phys_region *pr)
132 assert(pr->memtype->writable);
133 return ((vr->flags & VR_WRITABLE) && pr->memtype->writable(pr));
136 #if SANITYCHECKS
138 /*===========================================================================*
139 * map_sanitycheck_pt *
140 *===========================================================================*/
141 static int map_sanitycheck_pt(struct vmproc *vmp,
142 struct vir_region *vr, struct phys_region *pr)
144 struct phys_block *pb = pr->ph;
145 int rw;
146 int r;
148 if(pr_writable(vr, pr))
149 rw = PTF_WRITE;
150 else
151 rw = PTF_READ;
153 r = pt_writemap(vmp, &vmp->vm_pt, vr->vaddr + pr->offset,
154 pb->phys, VM_PAGE_SIZE, PTF_PRESENT | PTF_USER | rw, WMF_VERIFY);
156 if(r != OK) {
157 printf("proc %d phys_region 0x%lx sanity check failed\n",
158 vmp->vm_endpoint, pr->offset);
159 map_printregion(vr);
162 return r;
165 /*===========================================================================*
166 * map_sanitycheck *
167 *===========================================================================*/
168 void map_sanitycheck(const char *file, int line)
170 struct vmproc *vmp;
172 /* Macro for looping over all physical blocks of all regions of
173 * all processes.
175 #define ALLREGIONS(regioncode, physcode) \
176 for(vmp = vmproc; vmp < &vmproc[VMP_NR]; vmp++) { \
177 vir_bytes voffset; \
178 region_iter v_iter; \
179 struct vir_region *vr; \
180 if(!(vmp->vm_flags & VMF_INUSE)) \
181 continue; \
182 region_start_iter_least(&vmp->vm_regions_avl, &v_iter); \
183 while((vr = region_get_iter(&v_iter))) { \
184 struct phys_region *pr; \
185 regioncode; \
186 for(voffset = 0; voffset < vr->length; \
187 voffset += VM_PAGE_SIZE) { \
188 if(!(pr = physblock_get(vr, voffset))) \
189 continue; \
190 physcode; \
192 region_incr_iter(&v_iter); \
196 #define MYSLABSANE(s) MYASSERT(slabsane_f(__FILE__, __LINE__, s, sizeof(*(s))))
197 /* Basic pointers check. */
198 ALLREGIONS(MYSLABSANE(vr),MYSLABSANE(pr); MYSLABSANE(pr->ph);MYSLABSANE(pr->parent));
199 ALLREGIONS(/* MYASSERT(vr->parent == vmp) */,MYASSERT(pr->parent == vr););
201 /* Do counting for consistency check. */
202 ALLREGIONS(;,USE(pr->ph, pr->ph->seencount = 0;););
203 ALLREGIONS(;,MYASSERT(pr->offset == voffset););
204 ALLREGIONS(;,USE(pr->ph, pr->ph->seencount++;);
205 if(pr->ph->seencount == 1) {
206 if(pr->memtype->ev_sanitycheck)
207 pr->memtype->ev_sanitycheck(pr, file, line);
211 /* Do consistency check. */
212 ALLREGIONS({ struct vir_region *nextvr = getnextvr(vr);
213 if(nextvr) {
214 MYASSERT(vr->vaddr < nextvr->vaddr);
215 MYASSERT(vr->vaddr + vr->length <= nextvr->vaddr);
218 MYASSERT(!(vr->vaddr % VM_PAGE_SIZE));,
219 if(pr->ph->flags & PBF_INCACHE) pr->ph->seencount++;
220 if(pr->ph->refcount != pr->ph->seencount) {
221 map_printmap(vmp);
222 printf("ph in vr %p: 0x%lx refcount %u "
223 "but seencount %u\n",
224 vr, pr->offset,
225 pr->ph->refcount, pr->ph->seencount);
228 int n_others = 0;
229 struct phys_region *others;
230 if(pr->ph->refcount > 0) {
231 MYASSERT(pr->ph->firstregion);
232 if(pr->ph->refcount == 1) {
233 MYASSERT(pr->ph->firstregion == pr);
235 } else {
236 MYASSERT(!pr->ph->firstregion);
238 for(others = pr->ph->firstregion; others;
239 others = others->next_ph_list) {
240 MYSLABSANE(others);
241 MYASSERT(others->ph == pr->ph);
242 n_others++;
244 if(pr->ph->flags & PBF_INCACHE) n_others++;
245 MYASSERT(pr->ph->refcount == n_others);
247 MYASSERT(pr->ph->refcount == pr->ph->seencount);
248 MYASSERT(!(pr->offset % VM_PAGE_SIZE)););
249 ALLREGIONS(,MYASSERT(map_sanitycheck_pt(vmp, vr, pr) == OK));
252 #endif
254 /*=========================================================================*
255 * map_ph_writept *
256 *=========================================================================*/
257 int map_ph_writept(struct vmproc *vmp, struct vir_region *vr,
258 struct phys_region *pr)
260 int flags = PTF_PRESENT | PTF_USER;
261 struct phys_block *pb = pr->ph;
263 assert(vr);
264 assert(pr);
265 assert(pb);
267 assert(!(vr->vaddr % VM_PAGE_SIZE));
268 assert(!(pr->offset % VM_PAGE_SIZE));
269 assert(pb->refcount > 0);
271 if(pr_writable(vr, pr))
272 flags |= PTF_WRITE;
273 else
274 flags |= PTF_READ;
277 if(vr->def_memtype->pt_flags)
278 flags |= vr->def_memtype->pt_flags(vr);
280 if(pt_writemap(vmp, &vmp->vm_pt, vr->vaddr + pr->offset,
281 pb->phys, VM_PAGE_SIZE, flags,
282 #if SANITYCHECKS
283 !pr->written ? 0 :
284 #endif
285 WMF_OVERWRITE) != OK) {
286 printf("VM: map_writept: pt_writemap failed\n");
287 return ENOMEM;
290 #if SANITYCHECKS
291 USE(pr, pr->written = 1;);
292 #endif
294 return OK;
297 #define SLOT_FAIL ((vir_bytes) -1)
299 /*===========================================================================*
300 * region_find_slot_range *
301 *===========================================================================*/
302 static vir_bytes region_find_slot_range(struct vmproc *vmp,
303 vir_bytes minv, vir_bytes maxv, vir_bytes length)
305 struct vir_region *lastregion;
306 vir_bytes startv = 0;
307 int foundflag = 0;
308 region_iter iter;
310 SANITYCHECK(SCL_FUNCTIONS);
312 /* Length must be reasonable. */
313 assert(length > 0);
315 /* Special case: allow caller to set maxv to 0 meaning 'I want
316 * it to be mapped in right here.'
318 if(maxv == 0) {
319 maxv = minv + length;
321 /* Sanity check. */
322 if(maxv <= minv) {
323 printf("region_find_slot: minv 0x%lx and bytes 0x%lx\n",
324 minv, length);
325 return SLOT_FAIL;
329 /* Basic input sanity checks. */
330 assert(!(length % VM_PAGE_SIZE));
331 if(minv >= maxv) {
332 printf("VM: 1 minv: 0x%lx maxv: 0x%lx length: 0x%lx\n",
333 minv, maxv, length);
336 assert(minv < maxv);
338 if(minv + length > maxv)
339 return SLOT_FAIL;
341 #define FREEVRANGE_TRY(rangestart, rangeend) { \
342 vir_bytes frstart = (rangestart), frend = (rangeend); \
343 frstart = MAX(frstart, minv); \
344 frend = MIN(frend, maxv); \
345 if(frend > frstart && (frend - frstart) >= length) { \
346 startv = frend-length; \
347 foundflag = 1; \
350 #define FREEVRANGE(start, end) { \
351 assert(!foundflag); \
352 FREEVRANGE_TRY(((start)+VM_PAGE_SIZE), ((end)-VM_PAGE_SIZE)); \
353 if(!foundflag) { \
354 FREEVRANGE_TRY((start), (end)); \
358 /* find region after maxv. */
359 region_start_iter(&vmp->vm_regions_avl, &iter, maxv, AVL_GREATER_EQUAL);
360 lastregion = region_get_iter(&iter);
362 if(!lastregion) {
363 /* This is the free virtual address space after the last region. */
364 region_start_iter(&vmp->vm_regions_avl, &iter, maxv, AVL_LESS);
365 lastregion = region_get_iter(&iter);
366 FREEVRANGE(lastregion ?
367 lastregion->vaddr+lastregion->length : 0, VM_DATATOP);
370 if(!foundflag) {
371 struct vir_region *vr;
372 while((vr = region_get_iter(&iter)) && !foundflag) {
373 struct vir_region *nextvr;
374 region_decr_iter(&iter);
375 nextvr = region_get_iter(&iter);
376 FREEVRANGE(nextvr ? nextvr->vaddr+nextvr->length : 0,
377 vr->vaddr);
381 if(!foundflag) {
382 return SLOT_FAIL;
385 /* However we got it, startv must be in the requested range. */
386 assert(startv >= minv);
387 assert(startv < maxv);
388 assert(startv + length <= maxv);
390 /* remember this position as a hint for next time. */
391 vmp->vm_region_top = startv + length;
393 return startv;
396 /*===========================================================================*
397 * region_find_slot *
398 *===========================================================================*/
399 static vir_bytes region_find_slot(struct vmproc *vmp,
400 vir_bytes minv, vir_bytes maxv, vir_bytes length)
402 vir_bytes v, hint = vmp->vm_region_top;
404 /* use the top of the last inserted region as a minv hint if
405 * possible. remember that a zero maxv is a special case.
408 if(maxv && hint < maxv && hint >= minv) {
409 v = region_find_slot_range(vmp, minv, hint, length);
411 if(v != SLOT_FAIL)
412 return v;
415 return region_find_slot_range(vmp, minv, maxv, length);
418 static unsigned int phys_slot(vir_bytes len)
420 assert(!(len % VM_PAGE_SIZE));
421 return len / VM_PAGE_SIZE;
424 static struct vir_region *region_new(struct vmproc *vmp, vir_bytes startv, vir_bytes length,
425 int flags, mem_type_t *memtype)
427 struct vir_region *newregion;
428 struct phys_region **newphysregions;
429 static u32_t id;
430 int slots = phys_slot(length);
432 if(!(SLABALLOC(newregion))) {
433 printf("vm: region_new: could not allocate\n");
434 return NULL;
437 /* Fill in node details. */
438 USE(newregion,
439 memset(newregion, 0, sizeof(*newregion));
440 newregion->vaddr = startv;
441 newregion->length = length;
442 newregion->flags = flags;
443 newregion->def_memtype = memtype;
444 newregion->remaps = 0;
445 newregion->id = id++;
446 newregion->lower = newregion->higher = NULL;
447 newregion->parent = vmp;);
449 if(!(newphysregions = calloc(slots, sizeof(struct phys_region *)))) {
450 printf("VM: region_new: allocating phys blocks failed\n");
451 SLABFREE(newregion);
452 return NULL;
455 USE(newregion, newregion->physblocks = newphysregions;);
457 return newregion;
460 /*===========================================================================*
461 * map_page_region *
462 *===========================================================================*/
463 struct vir_region *map_page_region(struct vmproc *vmp, vir_bytes minv,
464 vir_bytes maxv, vir_bytes length, u32_t flags, int mapflags,
465 mem_type_t *memtype)
467 struct vir_region *newregion;
468 vir_bytes startv;
470 assert(!(length % VM_PAGE_SIZE));
472 SANITYCHECK(SCL_FUNCTIONS);
474 startv = region_find_slot(vmp, minv, maxv, length);
475 if (startv == SLOT_FAIL)
476 return NULL;
478 /* Now we want a new region. */
479 if(!(newregion = region_new(vmp, startv, length, flags, memtype))) {
480 printf("VM: map_page_region: allocating region failed\n");
481 return NULL;
484 /* If a new event is specified, invoke it. */
485 if(newregion->def_memtype->ev_new) {
486 if(newregion->def_memtype->ev_new(newregion) != OK) {
487 /* ev_new will have freed and removed the region */
488 return NULL;
492 if(mapflags & MF_PREALLOC) {
493 if(map_handle_memory(vmp, newregion, 0, length, 1,
494 NULL, 0, 0) != OK) {
495 printf("VM: map_page_region: prealloc failed\n");
496 map_free(newregion);
497 return NULL;
501 /* Pre-allocations should be uninitialized, but after that it's a
502 * different story.
504 USE(newregion, newregion->flags &= ~VR_UNINITIALIZED;);
506 /* Link it. */
507 region_insert(&vmp->vm_regions_avl, newregion);
509 #if SANITYCHECKS
510 assert(startv == newregion->vaddr);
512 struct vir_region *nextvr;
513 if((nextvr = getnextvr(newregion))) {
514 assert(newregion->vaddr < nextvr->vaddr);
517 #endif
519 SANITYCHECK(SCL_FUNCTIONS);
521 return newregion;
524 /*===========================================================================*
525 * map_subfree *
526 *===========================================================================*/
527 static int map_subfree(struct vir_region *region,
528 vir_bytes start, vir_bytes len)
530 struct phys_region *pr;
531 vir_bytes end = start+len;
532 vir_bytes voffset;
534 #if SANITYCHECKS
535 SLABSANE(region);
536 for(voffset = 0; voffset < phys_slot(region->length);
537 voffset += VM_PAGE_SIZE) {
538 struct phys_region *others;
539 struct phys_block *pb;
541 if(!(pr = physblock_get(region, voffset)))
542 continue;
544 pb = pr->ph;
546 for(others = pb->firstregion; others;
547 others = others->next_ph_list) {
548 assert(others->ph == pb);
551 #endif
553 for(voffset = start; voffset < end; voffset+=VM_PAGE_SIZE) {
554 if(!(pr = physblock_get(region, voffset)))
555 continue;
556 assert(pr->offset >= start);
557 assert(pr->offset < end);
558 pb_unreferenced(region, pr, 1);
559 SLABFREE(pr);
562 return OK;
565 /*===========================================================================*
566 * map_free *
567 *===========================================================================*/
568 int map_free(struct vir_region *region)
570 int r;
572 if((r=map_subfree(region, 0, region->length)) != OK) {
573 printf("%d\n", __LINE__);
574 return r;
577 if(region->def_memtype->ev_delete)
578 region->def_memtype->ev_delete(region);
579 free(region->physblocks);
580 region->physblocks = NULL;
581 SLABFREE(region);
583 return OK;
586 /*========================================================================*
587 * map_free_proc *
588 *========================================================================*/
589 int map_free_proc(struct vmproc *vmp)
591 struct vir_region *r;
593 while((r = region_search_root(&vmp->vm_regions_avl))) {
594 SANITYCHECK(SCL_DETAIL);
595 #if SANITYCHECKS
596 nocheck++;
597 #endif
598 region_remove(&vmp->vm_regions_avl, r->vaddr); /* For sanity checks. */
599 map_free(r);
600 #if SANITYCHECKS
601 nocheck--;
602 #endif
603 SANITYCHECK(SCL_DETAIL);
606 region_init(&vmp->vm_regions_avl);
608 SANITYCHECK(SCL_FUNCTIONS);
610 return OK;
613 /*===========================================================================*
614 * map_lookup *
615 *===========================================================================*/
616 struct vir_region *map_lookup(struct vmproc *vmp,
617 vir_bytes offset, struct phys_region **physr)
619 struct vir_region *r;
621 SANITYCHECK(SCL_FUNCTIONS);
623 #if SANITYCHECKS
624 if(!region_search_root(&vmp->vm_regions_avl))
625 panic("process has no regions: %d", vmp->vm_endpoint);
626 #endif
628 if((r = region_search(&vmp->vm_regions_avl, offset, AVL_LESS_EQUAL))) {
629 vir_bytes ph;
630 if(offset >= r->vaddr && offset < r->vaddr + r->length) {
631 ph = offset - r->vaddr;
632 if(physr) {
633 *physr = physblock_get(r, ph);
634 if(*physr) assert((*physr)->offset == ph);
636 return r;
640 SANITYCHECK(SCL_FUNCTIONS);
642 return NULL;
645 u32_t vrallocflags(u32_t flags)
647 u32_t allocflags = 0;
649 if(flags & VR_PHYS64K)
650 allocflags |= PAF_ALIGN64K;
651 if(flags & VR_LOWER16MB)
652 allocflags |= PAF_LOWER16MB;
653 if(flags & VR_LOWER1MB)
654 allocflags |= PAF_LOWER1MB;
655 if(!(flags & VR_UNINITIALIZED))
656 allocflags |= PAF_CLEAR;
658 return allocflags;
661 /*===========================================================================*
662 * map_pf *
663 *===========================================================================*/
664 int map_pf(struct vmproc *vmp,
665 struct vir_region *region,
666 vir_bytes offset,
667 int write,
668 vfs_callback_t pf_callback,
669 void *state,
670 int len,
671 int *io)
673 struct phys_region *ph;
674 int r = OK;
676 offset -= offset % VM_PAGE_SIZE;
678 /* assert(offset >= 0); */ /* always true */
679 assert(offset < region->length);
681 assert(!(region->vaddr % VM_PAGE_SIZE));
682 assert(!(write && !(region->flags & VR_WRITABLE)));
684 SANITYCHECK(SCL_FUNCTIONS);
686 if(!(ph = physblock_get(region, offset))) {
687 struct phys_block *pb;
689 /* New block. */
691 if(!(pb = pb_new(MAP_NONE))) {
692 printf("map_pf: pb_new failed\n");
693 return ENOMEM;
696 if(!(ph = pb_reference(pb, offset, region,
697 region->def_memtype))) {
698 printf("map_pf: pb_reference failed\n");
699 pb_free(pb);
700 return ENOMEM;
704 assert(ph);
705 assert(ph->ph);
707 /* If we're writing and the block is already
708 * writable, nothing to do.
711 assert(ph->memtype->writable);
713 if(!write || !ph->memtype->writable(ph)) {
714 assert(ph->memtype->ev_pagefault);
715 assert(ph->ph);
717 if((r = ph->memtype->ev_pagefault(vmp,
718 region, ph, write, pf_callback, state, len, io)) == SUSPEND) {
719 return SUSPEND;
722 if(r != OK) {
723 #if 0
724 printf("map_pf: pagefault in %s failed\n", ph->memtype->name);
725 #endif
726 if(ph)
727 pb_unreferenced(region, ph, 1);
728 return r;
731 assert(ph);
732 assert(ph->ph);
733 assert(ph->ph->phys != MAP_NONE);
736 assert(ph->ph);
737 assert(ph->ph->phys != MAP_NONE);
739 if((r = map_ph_writept(vmp, region, ph)) != OK) {
740 printf("map_pf: writept failed\n");
741 return r;
744 SANITYCHECK(SCL_FUNCTIONS);
746 #if SANITYCHECKS
747 if(OK != pt_checkrange(&vmp->vm_pt, region->vaddr+offset,
748 VM_PAGE_SIZE, write)) {
749 panic("map_pf: pt_checkrange failed: %d", r);
751 #endif
753 return r;
756 int map_handle_memory(struct vmproc *vmp,
757 struct vir_region *region, vir_bytes start_offset, vir_bytes length,
758 int write, vfs_callback_t cb, void *state, int statelen)
760 vir_bytes offset, lim;
761 int r;
762 int io = 0;
764 assert(length > 0);
765 lim = start_offset + length;
766 assert(lim > start_offset);
768 for(offset = start_offset; offset < lim; offset += VM_PAGE_SIZE)
769 if((r = map_pf(vmp, region, offset, write,
770 cb, state, statelen, &io)) != OK)
771 return r;
773 return OK;
776 /*===========================================================================*
777 * map_pin_memory *
778 *===========================================================================*/
779 int map_pin_memory(struct vmproc *vmp)
781 struct vir_region *vr;
782 int r;
783 region_iter iter;
784 region_start_iter_least(&vmp->vm_regions_avl, &iter);
785 /* Scan all memory regions. */
786 pt_assert(&vmp->vm_pt);
787 while((vr = region_get_iter(&iter))) {
788 /* Make sure region is mapped to physical memory and writable.*/
789 r = map_handle_memory(vmp, vr, 0, vr->length, 1, NULL, 0, 0);
790 if(r != OK) {
791 panic("map_pin_memory: map_handle_memory failed: %d", r);
793 region_incr_iter(&iter);
795 pt_assert(&vmp->vm_pt);
796 return OK;
799 /*===========================================================================*
800 * map_copy_region *
801 *===========================================================================*/
802 struct vir_region *map_copy_region(struct vmproc *vmp, struct vir_region *vr)
804 /* map_copy_region creates a complete copy of the vir_region
805 * data structure, linking in the same phys_blocks directly,
806 * but all in limbo, i.e., the caller has to link the vir_region
807 * to a process. Therefore it doesn't increase the refcount in
808 * the phys_block; the caller has to do this once it's linked.
809 * The reason for this is to keep the sanity checks working
810 * within this function.
812 struct vir_region *newvr;
813 struct phys_region *ph;
814 int r;
815 #if SANITYCHECKS
816 unsigned int cr;
817 cr = physregions(vr);
818 #endif
819 vir_bytes p;
821 if(!(newvr = region_new(vr->parent, vr->vaddr, vr->length, vr->flags, vr->def_memtype)))
822 return NULL;
824 USE(newvr, newvr->parent = vmp;);
826 if(vr->def_memtype->ev_copy && (r=vr->def_memtype->ev_copy(vr, newvr)) != OK) {
827 map_free(newvr);
828 printf("VM: memtype-specific copy failed (%d)\n", r);
829 return NULL;
832 for(p = 0; p < phys_slot(vr->length); p++) {
833 struct phys_region *newph;
835 if(!(ph = physblock_get(vr, p*VM_PAGE_SIZE))) continue;
836 newph = pb_reference(ph->ph, ph->offset, newvr,
837 vr->def_memtype);
839 if(!newph) { map_free(newvr); return NULL; }
841 if(ph->memtype->ev_reference)
842 ph->memtype->ev_reference(ph, newph);
844 #if SANITYCHECKS
845 USE(newph, newph->written = 0;);
846 assert(physregions(vr) == cr);
847 #endif
850 #if SANITYCHECKS
851 assert(physregions(vr) == physregions(newvr));
852 #endif
854 return newvr;
857 /*===========================================================================*
858 * copy_abs2region *
859 *===========================================================================*/
860 int copy_abs2region(phys_bytes absaddr, struct vir_region *destregion,
861 phys_bytes offset, phys_bytes len)
864 assert(destregion);
865 assert(destregion->physblocks);
866 while(len > 0) {
867 phys_bytes sublen, suboffset;
868 struct phys_region *ph;
869 assert(destregion);
870 assert(destregion->physblocks);
871 if(!(ph = physblock_get(destregion, offset))) {
872 printf("VM: copy_abs2region: no phys region found (1).\n");
873 return EFAULT;
875 assert(ph->offset <= offset);
876 if(ph->offset+VM_PAGE_SIZE <= offset) {
877 printf("VM: copy_abs2region: no phys region found (2).\n");
878 return EFAULT;
880 suboffset = offset - ph->offset;
881 assert(suboffset < VM_PAGE_SIZE);
882 sublen = len;
883 if(sublen > VM_PAGE_SIZE - suboffset)
884 sublen = VM_PAGE_SIZE - suboffset;
885 assert(suboffset + sublen <= VM_PAGE_SIZE);
886 if(ph->ph->refcount != 1) {
887 printf("VM: copy_abs2region: refcount not 1.\n");
888 return EFAULT;
891 if(sys_abscopy(absaddr, ph->ph->phys + suboffset, sublen) != OK) {
892 printf("VM: copy_abs2region: abscopy failed.\n");
893 return EFAULT;
895 absaddr += sublen;
896 offset += sublen;
897 len -= sublen;
900 return OK;
903 /*=========================================================================*
904 * map_writept *
905 *=========================================================================*/
906 int map_writept(struct vmproc *vmp)
908 struct vir_region *vr;
909 struct phys_region *ph;
910 int r;
911 region_iter v_iter;
912 region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
914 while((vr = region_get_iter(&v_iter))) {
915 vir_bytes p;
916 for(p = 0; p < vr->length; p += VM_PAGE_SIZE) {
917 if(!(ph = physblock_get(vr, p))) continue;
919 if((r=map_ph_writept(vmp, vr, ph)) != OK) {
920 printf("VM: map_writept: failed\n");
921 return r;
924 region_incr_iter(&v_iter);
927 return OK;
930 /*========================================================================*
931 * map_proc_copy *
932 *========================================================================*/
933 int map_proc_copy(struct vmproc *dst, struct vmproc *src)
935 /* Copy all the memory regions from the src process to the dst process. */
936 region_init(&dst->vm_regions_avl);
938 return map_proc_copy_range(dst, src, NULL, NULL);
941 /*========================================================================*
942 * map_proc_copy_range *
943 *========================================================================*/
944 int map_proc_copy_range(struct vmproc *dst, struct vmproc *src,
945 struct vir_region *start_src_vr, struct vir_region *end_src_vr)
947 struct vir_region *vr;
948 region_iter v_iter;
950 if(!start_src_vr)
951 start_src_vr = region_search_least(&src->vm_regions_avl);
952 if(!end_src_vr)
953 end_src_vr = region_search_greatest(&src->vm_regions_avl);
955 assert(start_src_vr && end_src_vr);
956 assert(start_src_vr->parent == src);
957 region_start_iter(&src->vm_regions_avl, &v_iter,
958 start_src_vr->vaddr, AVL_EQUAL);
959 assert(region_get_iter(&v_iter) == start_src_vr);
961 /* Copy source regions into the destination. */
963 SANITYCHECK(SCL_FUNCTIONS);
965 while((vr = region_get_iter(&v_iter))) {
966 struct vir_region *newvr;
967 if(!(newvr = map_copy_region(dst, vr))) {
968 map_free_proc(dst);
969 return ENOMEM;
971 region_insert(&dst->vm_regions_avl, newvr);
972 assert(vr->length == newvr->length);
974 #if SANITYCHECKS
976 vir_bytes vaddr;
977 struct phys_region *orig_ph, *new_ph;
978 assert(vr->physblocks != newvr->physblocks);
979 for(vaddr = 0; vaddr < vr->length; vaddr += VM_PAGE_SIZE) {
980 orig_ph = physblock_get(vr, vaddr);
981 new_ph = physblock_get(newvr, vaddr);
982 if(!orig_ph) { assert(!new_ph); continue;}
983 assert(new_ph);
984 assert(orig_ph != new_ph);
985 assert(orig_ph->ph == new_ph->ph);
988 #endif
989 if(vr == end_src_vr) {
990 break;
992 region_incr_iter(&v_iter);
995 map_writept(src);
996 map_writept(dst);
998 SANITYCHECK(SCL_FUNCTIONS);
999 return OK;
1002 int map_region_extend_upto_v(struct vmproc *vmp, vir_bytes v)
1004 vir_bytes offset = v, limit, extralen;
1005 struct vir_region *vr, *nextvr;
1006 struct phys_region **newpr;
1007 int newslots, prevslots, addedslots, r;
1009 offset = roundup(offset, VM_PAGE_SIZE);
1011 if(!(vr = region_search(&vmp->vm_regions_avl, offset, AVL_LESS))) {
1012 printf("VM: nothing to extend\n");
1013 return ENOMEM;
1016 if(vr->vaddr + vr->length >= v) return OK;
1018 limit = vr->vaddr + vr->length;
1020 assert(vr->vaddr <= offset);
1021 newslots = phys_slot(offset - vr->vaddr);
1022 prevslots = phys_slot(vr->length);
1023 assert(newslots >= prevslots);
1024 addedslots = newslots - prevslots;
1025 extralen = offset - limit;
1026 assert(extralen > 0);
1028 if((nextvr = getnextvr(vr))) {
1029 assert(offset <= nextvr->vaddr);
1032 if(nextvr && nextvr->vaddr < offset) {
1033 printf("VM: can't grow into next region\n");
1034 return ENOMEM;
1037 if(!vr->def_memtype->ev_resize) {
1038 if(!map_page_region(vmp, limit, 0, extralen,
1039 VR_WRITABLE | VR_ANON,
1040 0, &mem_type_anon)) {
1041 printf("resize: couldn't put anon memory there\n");
1042 return ENOMEM;
1044 return OK;
1047 if(!(newpr = realloc(vr->physblocks,
1048 newslots * sizeof(struct phys_region *)))) {
1049 printf("VM: map_region_extend_upto_v: realloc failed\n");
1050 return ENOMEM;
1053 vr->physblocks = newpr;
1054 memset(vr->physblocks + prevslots, 0,
1055 addedslots * sizeof(struct phys_region *));
1057 r = vr->def_memtype->ev_resize(vmp, vr, offset - vr->vaddr);
1059 return r;
1062 /*========================================================================*
1063 * map_unmap_region *
1064 *========================================================================*/
1065 int map_unmap_region(struct vmproc *vmp, struct vir_region *r,
1066 vir_bytes offset, vir_bytes len)
1068 /* Shrink the region by 'len' bytes, from the start. Unreference
1069 * memory it used to reference if any.
1071 vir_bytes regionstart;
1072 int freeslots = phys_slot(len);
1074 SANITYCHECK(SCL_FUNCTIONS);
1076 if(offset+len > r->length || (len % VM_PAGE_SIZE)) {
1077 printf("VM: bogus length 0x%lx\n", len);
1078 return EINVAL;
1081 regionstart = r->vaddr + offset;
1083 /* unreference its memory */
1084 map_subfree(r, offset, len);
1086 /* if unmap was at start/end of this region, it actually shrinks */
1087 if(r->length == len) {
1088 /* Whole region disappears. Unlink and free it. */
1089 region_remove(&vmp->vm_regions_avl, r->vaddr);
1090 map_free(r);
1091 } else if(offset == 0) {
1092 struct phys_region *pr;
1093 vir_bytes voffset;
1094 int remslots;
1096 if(!r->def_memtype->ev_lowshrink) {
1097 printf("VM: low-shrinking not implemented for %s\n",
1098 r->def_memtype->name);
1099 return EINVAL;
1102 if(r->def_memtype->ev_lowshrink(r, len) != OK) {
1103 printf("VM: low-shrinking failed for %s\n",
1104 r->def_memtype->name);
1105 return EINVAL;
1108 region_remove(&vmp->vm_regions_avl, r->vaddr);
1110 USE(r,
1111 r->vaddr += len;);
1113 remslots = phys_slot(r->length);
1115 region_insert(&vmp->vm_regions_avl, r);
1117 /* vaddr has increased; to make all the phys_regions
1118 * point to the same addresses, make them shrink by the
1119 * same amount.
1121 for(voffset = len; voffset < r->length;
1122 voffset += VM_PAGE_SIZE) {
1123 if(!(pr = physblock_get(r, voffset))) continue;
1124 assert(pr->offset >= offset);
1125 assert(pr->offset >= len);
1126 USE(pr, pr->offset -= len;);
1128 if(remslots)
1129 memmove(r->physblocks, r->physblocks + freeslots,
1130 remslots * sizeof(struct phys_region *));
1131 USE(r, r->length -= len;);
1132 } else if(offset + len == r->length) {
1133 assert(len <= r->length);
1134 r->length -= len;
1137 SANITYCHECK(SCL_DETAIL);
1139 if(pt_writemap(vmp, &vmp->vm_pt, regionstart,
1140 MAP_NONE, len, 0, WMF_OVERWRITE) != OK) {
1141 printf("VM: map_unmap_region: pt_writemap failed\n");
1142 return ENOMEM;
1145 SANITYCHECK(SCL_FUNCTIONS);
1147 return OK;
1150 static int split_region(struct vmproc *vmp, struct vir_region *vr,
1151 struct vir_region **vr1, struct vir_region **vr2, vir_bytes split_len)
1153 struct vir_region *r1 = NULL, *r2 = NULL;
1154 vir_bytes rem_len = vr->length - split_len;
1155 int slots1, slots2;
1156 vir_bytes voffset;
1157 int n1 = 0, n2 = 0;
1159 assert(!(split_len % VM_PAGE_SIZE));
1160 assert(!(rem_len % VM_PAGE_SIZE));
1161 assert(!(vr->vaddr % VM_PAGE_SIZE));
1162 assert(!(vr->length % VM_PAGE_SIZE));
1164 if(!vr->def_memtype->ev_split) {
1165 printf("VM: split region not implemented for %s\n",
1166 vr->def_memtype->name);
1167 sys_diagctl_stacktrace(vmp->vm_endpoint);
1168 return EINVAL;
1171 slots1 = phys_slot(split_len);
1172 slots2 = phys_slot(rem_len);
1174 if(!(r1 = region_new(vmp, vr->vaddr, split_len, vr->flags,
1175 vr->def_memtype))) {
1176 goto bail;
1179 if(!(r2 = region_new(vmp, vr->vaddr+split_len, rem_len, vr->flags,
1180 vr->def_memtype))) {
1181 map_free(r1);
1182 goto bail;
1185 for(voffset = 0; voffset < r1->length; voffset += VM_PAGE_SIZE) {
1186 struct phys_region *ph, *phn;
1187 if(!(ph = physblock_get(vr, voffset))) continue;
1188 if(!(phn = pb_reference(ph->ph, voffset, r1, ph->memtype)))
1189 goto bail;
1190 n1++;
1193 for(voffset = 0; voffset < r2->length; voffset += VM_PAGE_SIZE) {
1194 struct phys_region *ph, *phn;
1195 if(!(ph = physblock_get(vr, split_len + voffset))) continue;
1196 if(!(phn = pb_reference(ph->ph, voffset, r2, ph->memtype)))
1197 goto bail;
1198 n2++;
1201 vr->def_memtype->ev_split(vmp, vr, r1, r2);
1203 region_remove(&vmp->vm_regions_avl, vr->vaddr);
1204 map_free(vr);
1205 region_insert(&vmp->vm_regions_avl, r1);
1206 region_insert(&vmp->vm_regions_avl, r2);
1208 *vr1 = r1;
1209 *vr2 = r2;
1211 return OK;
1213 bail:
1214 if(r1) map_free(r1);
1215 if(r2) map_free(r2);
1217 printf("split_region: failed\n");
1219 return ENOMEM;
1222 int map_unmap_range(struct vmproc *vmp, vir_bytes unmap_start, vir_bytes length)
1224 vir_bytes o = unmap_start % VM_PAGE_SIZE, unmap_limit;
1225 region_iter v_iter;
1226 struct vir_region *vr, *nextvr;
1228 unmap_start -= o;
1229 length += o;
1230 length = roundup(length, VM_PAGE_SIZE);
1231 unmap_limit = length + unmap_start;
1233 if(length < VM_PAGE_SIZE) return EINVAL;
1234 if(unmap_limit <= unmap_start) return EINVAL;
1236 region_start_iter(&vmp->vm_regions_avl, &v_iter, unmap_start, AVL_LESS_EQUAL);
1238 if(!(vr = region_get_iter(&v_iter))) {
1239 region_start_iter(&vmp->vm_regions_avl, &v_iter, unmap_start, AVL_GREATER);
1240 if(!(vr = region_get_iter(&v_iter))) {
1241 return OK;
1245 assert(vr);
1247 for(; vr && vr->vaddr < unmap_limit; vr = nextvr) {
1248 vir_bytes thislimit = vr->vaddr + vr->length;
1249 vir_bytes this_unmap_start, this_unmap_limit;
1250 vir_bytes remainlen;
1251 int r;
1253 region_incr_iter(&v_iter);
1254 nextvr = region_get_iter(&v_iter);
1256 assert(thislimit > vr->vaddr);
1258 this_unmap_start = MAX(unmap_start, vr->vaddr);
1259 this_unmap_limit = MIN(unmap_limit, thislimit);
1261 if(this_unmap_start >= this_unmap_limit) continue;
1263 if(this_unmap_start > vr->vaddr && this_unmap_limit < thislimit) {
1264 struct vir_region *vr1, *vr2;
1265 vir_bytes split_len = this_unmap_limit - vr->vaddr;
1266 assert(split_len > 0);
1267 assert(split_len < vr->length);
1268 if((r=split_region(vmp, vr, &vr1, &vr2, split_len)) != OK) {
1269 printf("VM: unmap split failed\n");
1270 return r;
1272 vr = vr1;
1273 thislimit = vr->vaddr + vr->length;
1276 remainlen = this_unmap_limit - vr->vaddr;
1278 assert(this_unmap_start >= vr->vaddr);
1279 assert(this_unmap_limit <= thislimit);
1280 assert(remainlen > 0);
1282 r = map_unmap_region(vmp, vr, this_unmap_start - vr->vaddr,
1283 this_unmap_limit - this_unmap_start);
1285 if(r != OK) {
1286 printf("map_unmap_range: map_unmap_region failed\n");
1287 return r;
1290 if(nextvr) {
1291 region_start_iter(&vmp->vm_regions_avl, &v_iter, nextvr->vaddr, AVL_EQUAL);
1292 assert(region_get_iter(&v_iter) == nextvr);
1296 return OK;
1300 /*========================================================================*
1301 * map_region_lookup_type *
1302 *========================================================================*/
1303 struct vir_region* map_region_lookup_type(struct vmproc *vmp, u32_t type)
1305 struct vir_region *vr;
1306 struct phys_region *pr;
1307 vir_bytes used = 0, weighted = 0;
1308 region_iter v_iter;
1309 region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
1311 while((vr = region_get_iter(&v_iter))) {
1312 region_incr_iter(&v_iter);
1313 if(vr->flags & type)
1314 return vr;
1317 return NULL;
1320 /*========================================================================*
1321 * map_get_phys *
1322 *========================================================================*/
1323 int map_get_phys(struct vmproc *vmp, vir_bytes addr, phys_bytes *r)
1325 struct vir_region *vr;
1327 if (!(vr = map_lookup(vmp, addr, NULL)) ||
1328 (vr->vaddr != addr))
1329 return EINVAL;
1331 if (!vr->def_memtype->regionid)
1332 return EINVAL;
1334 if(r)
1335 *r = vr->def_memtype->regionid(vr);
1337 return OK;
1340 /*========================================================================*
1341 * map_get_ref *
1342 *========================================================================*/
1343 int map_get_ref(struct vmproc *vmp, vir_bytes addr, u8_t *cnt)
1345 struct vir_region *vr;
1347 if (!(vr = map_lookup(vmp, addr, NULL)) ||
1348 (vr->vaddr != addr) || !vr->def_memtype->refcount)
1349 return EINVAL;
1351 if (cnt)
1352 *cnt = vr->def_memtype->refcount(vr);
1354 return OK;
1357 void get_usage_info_kernel(struct vm_usage_info *vui)
1359 memset(vui, 0, sizeof(*vui));
1360 vui->vui_total = kernel_boot_info.kernel_allocated_bytes +
1361 kernel_boot_info.kernel_allocated_bytes_dynamic;
1362 /* All of the kernel's pages are actually mapped in. */
1363 vui->vui_virtual = vui->vui_mvirtual = vui->vui_total;
1366 static void get_usage_info_vm(struct vm_usage_info *vui)
1368 memset(vui, 0, sizeof(*vui));
1369 vui->vui_total = kernel_boot_info.vm_allocated_bytes +
1370 get_vm_self_pages() * VM_PAGE_SIZE;
1371 /* All of VM's pages are actually mapped in. */
1372 vui->vui_virtual = vui->vui_mvirtual = vui->vui_total;
1376 * Return whether the given region is for the associated process's stack.
1377 * Unfortunately, we do not actually have this information: in most cases, VM
1378 * is not responsible for actually setting up the stack in the first place.
1379 * Fortunately, this is only for statistical purposes, so we can get away with
1380 * guess work. However, it is certainly not accurate in the light of userspace
1381 * thread stacks, or if the process is messing with its stack in any way, or if
1382 * (currently) VFS decides to put the stack elsewhere, etcetera.
1384 static int
1385 is_stack_region(struct vir_region * vr)
1388 return (vr->vaddr == VM_STACKTOP - DEFAULT_STACK_LIMIT &&
1389 vr->length == DEFAULT_STACK_LIMIT);
1392 /*========================================================================*
1393 * get_usage_info *
1394 *========================================================================*/
1395 void get_usage_info(struct vmproc *vmp, struct vm_usage_info *vui)
1397 struct vir_region *vr;
1398 struct phys_region *ph;
1399 region_iter v_iter;
1400 region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
1401 vir_bytes voffset;
1403 memset(vui, 0, sizeof(*vui));
1405 if(vmp->vm_endpoint == VM_PROC_NR) {
1406 get_usage_info_vm(vui);
1407 return;
1410 if(vmp->vm_endpoint < 0) {
1411 get_usage_info_kernel(vui);
1412 return;
1415 while((vr = region_get_iter(&v_iter))) {
1416 vui->vui_virtual += vr->length;
1417 vui->vui_mvirtual += vr->length;
1418 for(voffset = 0; voffset < vr->length; voffset += VM_PAGE_SIZE) {
1419 if(!(ph = physblock_get(vr, voffset))) {
1420 /* mvirtual: discount unmapped stack pages. */
1421 if (is_stack_region(vr))
1422 vui->vui_mvirtual -= VM_PAGE_SIZE;
1423 continue;
1425 /* All present pages are counted towards the total. */
1426 vui->vui_total += VM_PAGE_SIZE;
1428 if (ph->ph->refcount > 1) {
1429 /* Any page with a refcount > 1 is common. */
1430 vui->vui_common += VM_PAGE_SIZE;
1432 /* Any common, non-COW page is shared. */
1433 if (vr->flags & VR_SHARED)
1434 vui->vui_shared += VM_PAGE_SIZE;
1437 region_incr_iter(&v_iter);
1441 * Also include getrusage resource information, so that the MIB service
1442 * need not make more than one call to VM for each process entry.
1444 vui->vui_maxrss = vmp->vm_total_max / 1024L;
1445 vui->vui_minflt = vmp->vm_minor_page_fault;
1446 vui->vui_majflt = vmp->vm_major_page_fault;
1449 /*===========================================================================*
1450 * get_region_info *
1451 *===========================================================================*/
1452 int get_region_info(struct vmproc *vmp, struct vm_region_info *vri,
1453 int max, vir_bytes *nextp)
1455 struct vir_region *vr;
1456 vir_bytes next;
1457 int count;
1458 region_iter v_iter;
1460 next = *nextp;
1462 if (!max) return 0;
1464 region_start_iter(&vmp->vm_regions_avl, &v_iter, next, AVL_GREATER_EQUAL);
1465 if(!(vr = region_get_iter(&v_iter))) return 0;
1467 for(count = 0; (vr = region_get_iter(&v_iter)) && count < max;
1468 region_incr_iter(&v_iter)) {
1469 struct phys_region *ph1 = NULL, *ph2 = NULL;
1470 vir_bytes voffset;
1472 /* where to start on next iteration, regardless of what we find now */
1473 next = vr->vaddr + vr->length;
1475 /* Report part of the region that's actually in use. */
1477 /* Get first and last phys_regions, if any */
1478 for(voffset = 0; voffset < vr->length; voffset += VM_PAGE_SIZE) {
1479 struct phys_region *ph;
1480 if(!(ph = physblock_get(vr, voffset))) continue;
1481 if(!ph1) ph1 = ph;
1482 ph2 = ph;
1485 if(!ph1 || !ph2) {
1486 printf("skipping empty region 0x%lx-0x%lx\n",
1487 vr->vaddr, vr->vaddr+vr->length);
1488 continue;
1491 /* Report start+length of region starting from lowest use. */
1492 vri->vri_addr = vr->vaddr + ph1->offset;
1493 vri->vri_prot = PROT_READ;
1494 vri->vri_length = ph2->offset + VM_PAGE_SIZE - ph1->offset;
1496 /* "AND" the provided protection with per-page protection. */
1497 if (vr->flags & VR_WRITABLE)
1498 vri->vri_prot |= PROT_WRITE;
1499 count++;
1500 vri++;
1503 *nextp = next;
1504 return count;
1507 /*========================================================================*
1508 * regionprintstats *
1509 *========================================================================*/
1510 void printregionstats(struct vmproc *vmp)
1512 struct vir_region *vr;
1513 struct phys_region *pr;
1514 vir_bytes used = 0, weighted = 0;
1515 region_iter v_iter;
1516 region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
1518 while((vr = region_get_iter(&v_iter))) {
1519 vir_bytes voffset;
1520 region_incr_iter(&v_iter);
1521 if(vr->flags & VR_DIRECT)
1522 continue;
1523 for(voffset = 0; voffset < vr->length; voffset+=VM_PAGE_SIZE) {
1524 if(!(pr = physblock_get(vr, voffset))) continue;
1525 used += VM_PAGE_SIZE;
1526 weighted += VM_PAGE_SIZE / pr->ph->refcount;
1530 printf("%6lukB %6lukB\n", used/1024, weighted/1024);
1532 return;
1535 void map_setparent(struct vmproc *vmp)
1537 region_iter iter;
1538 struct vir_region *vr;
1539 region_start_iter_least(&vmp->vm_regions_avl, &iter);
1540 while((vr = region_get_iter(&iter))) {
1541 USE(vr, vr->parent = vmp;);
1542 region_incr_iter(&iter);
1546 unsigned int physregions(struct vir_region *vr)
1548 unsigned int n = 0;
1549 vir_bytes voffset;
1550 for(voffset = 0; voffset < vr->length; voffset += VM_PAGE_SIZE) {
1551 if(physblock_get(vr, voffset))
1552 n++;
1554 return n;