coverity appeasement - redundant check
[minix.git] / servers / vm / region.c
blobde7135a8875d06a54be21a7479873ace8835a398
2 #define _SYSTEM 1
4 #include <minix/com.h>
5 #include <minix/callnr.h>
6 #include <minix/type.h>
7 #include <minix/config.h>
8 #include <minix/const.h>
9 #include <minix/sysutil.h>
10 #include <minix/syslib.h>
11 #include <minix/debug.h>
12 #include <minix/bitmap.h>
13 #include <minix/hash.h>
14 #include <machine/multiboot.h>
16 #include <sys/mman.h>
18 #include <limits.h>
19 #include <string.h>
20 #include <errno.h>
21 #include <assert.h>
22 #include <stdint.h>
23 #include <memory.h>
24 #include <sys/param.h>
26 #include "vm.h"
27 #include "proto.h"
28 #include "util.h"
29 #include "glo.h"
30 #include "region.h"
31 #include "sanitycheck.h"
32 #include "physravl.h"
33 #include "memlist.h"
35 /* LRU list. */
36 static yielded_t *lru_youngest = NULL, *lru_oldest = NULL;
38 /* Should a physblock be mapped writable? */
39 #define WRITABLE(r, pb) \
40 (((r)->flags & VR_WRITABLE) && \
41 (((r)->flags & (VR_DIRECT | VR_SHARED)) || \
42 (pb)->refcount == 1))
44 static int map_new_physblock(struct vmproc *vmp, struct vir_region
45 *region, vir_bytes offset, vir_bytes length, phys_bytes what, u32_t
46 allocflags, int written);
48 static int map_ph_writept(struct vmproc *vmp, struct vir_region *vr,
49 struct phys_region *pr);
51 static phys_bytes freeyieldednode(yielded_t *node, int freemem);
53 static struct vir_region *map_copy_region(struct vmproc *vmp, struct
54 vir_region *vr);
56 static struct phys_region *map_clone_ph_block(struct vmproc *vmp,
57 struct vir_region *region, struct phys_region *ph, physr_iter *iter);
59 #if SANITYCHECKS
60 static void lrucheck(void);
61 #endif
63 /* hash table of yielded blocks */
64 #define YIELD_HASHSIZE 65536
65 static yielded_avl vm_yielded_blocks[YIELD_HASHSIZE];
67 static int avl_inited = 0;
69 void map_region_init(void)
71 int h;
72 assert(!avl_inited);
73 for(h = 0; h < YIELD_HASHSIZE; h++)
74 yielded_init(&vm_yielded_blocks[h]);
75 avl_inited = 1;
78 static yielded_avl *get_yielded_avl(block_id_t id)
80 u32_t h;
82 assert(avl_inited);
84 hash_i_64(id.owner, id.id, h);
85 h = h % YIELD_HASHSIZE;
87 assert(h >= 0);
88 assert(h < YIELD_HASHSIZE);
90 return &vm_yielded_blocks[h];
93 static char *map_name(struct vir_region *vr)
95 static char name[100];
96 char *typename, *tag;
97 int type = vr->flags & (VR_ANON|VR_DIRECT);
98 switch(type) {
99 case VR_ANON:
100 typename = "anonymous";
101 break;
102 case VR_DIRECT:
103 typename = "direct";
104 break;
105 default:
106 panic("unknown mapping type: %d", type);
109 switch(vr->tag) {
110 case VRT_TEXT:
111 tag = "text";
112 break;
113 case VRT_STACK:
114 tag = "stack";
115 break;
116 case VRT_HEAP:
117 tag = "heap";
118 break;
119 case VRT_NONE:
120 tag = "untagged";
121 break;
122 default:
123 tag = "unknown tag value";
124 break;
127 sprintf(name, "%s, %s", typename, tag);
129 return name;
132 void map_printregion(struct vmproc *vmp, struct vir_region *vr)
134 physr_iter iter;
135 struct phys_region *ph;
136 printf("map_printmap: map_name: %s\n", map_name(vr));
137 printf("\t%lx (len 0x%lx, %lukB), %p\n",
138 vr->vaddr, vr->length, vr->length/1024, map_name(vr));
139 printf("\t\tphysblocks:\n");
140 physr_start_iter_least(vr->phys, &iter);
141 while((ph = physr_get_iter(&iter))) {
142 printf("\t\t@ %lx (refs %d): phys 0x%lx\n",
143 (vr->vaddr + ph->offset),
144 ph->ph->refcount, ph->ph->phys);
145 physr_incr_iter(&iter);
149 /*===========================================================================*
150 * map_printmap *
151 *===========================================================================*/
152 void map_printmap(vmp)
153 struct vmproc *vmp;
155 struct vir_region *vr;
156 region_iter iter;
158 printf("memory regions in process %d:\n", vmp->vm_endpoint);
160 region_start_iter_least(&vmp->vm_regions_avl, &iter);
161 while((vr = region_get_iter(&iter))) {
162 map_printregion(vmp, vr);
163 region_incr_iter(&iter);
167 static struct vir_region *getnextvr(struct vir_region *vr)
169 struct vir_region *nextvr;
170 region_iter v_iter;
171 SLABSANE(vr);
172 region_start_iter(&vr->parent->vm_regions_avl, &v_iter, vr->vaddr, AVL_EQUAL);
173 assert(region_get_iter(&v_iter));
174 assert(region_get_iter(&v_iter) == vr);
175 region_incr_iter(&v_iter);
176 nextvr = region_get_iter(&v_iter);
177 if(!nextvr) return NULL;
178 SLABSANE(nextvr);
179 assert(vr->parent == nextvr->parent);
180 assert(vr->vaddr < nextvr->vaddr);
181 assert(vr->vaddr + vr->length <= nextvr->vaddr);
182 return nextvr;
185 #if SANITYCHECKS
187 /*===========================================================================*
188 * map_sanitycheck_pt *
189 *===========================================================================*/
190 static int map_sanitycheck_pt(struct vmproc *vmp,
191 struct vir_region *vr, struct phys_region *pr)
193 struct phys_block *pb = pr->ph;
194 int rw;
195 int r;
197 if(WRITABLE(vr, pb))
198 rw = PTF_WRITE;
199 else
200 rw = PTF_READ;
202 r = pt_writemap(vmp, &vmp->vm_pt, vr->vaddr + pr->offset,
203 pb->phys, VM_PAGE_SIZE, PTF_PRESENT | PTF_USER | rw, WMF_VERIFY);
205 if(r != OK) {
206 printf("proc %d phys_region 0x%lx sanity check failed\n",
207 vmp->vm_endpoint, pr->offset);
208 map_printregion(vmp, vr);
211 return r;
214 /*===========================================================================*
215 * map_sanitycheck *
216 *===========================================================================*/
217 void map_sanitycheck(char *file, int line)
219 struct vmproc *vmp;
221 lrucheck();
223 /* Macro for looping over all physical blocks of all regions of
224 * all processes.
226 #define ALLREGIONS(regioncode, physcode) \
227 for(vmp = vmproc; vmp < &vmproc[VMP_NR]; vmp++) { \
228 region_iter v_iter; \
229 struct vir_region *vr; \
230 if(!(vmp->vm_flags & VMF_INUSE)) \
231 continue; \
232 region_start_iter_least(&vmp->vm_regions_avl, &v_iter); \
233 while((vr = region_get_iter(&v_iter))) { \
234 physr_iter iter; \
235 struct phys_region *pr; \
236 regioncode; \
237 physr_start_iter_least(vr->phys, &iter); \
238 while((pr = physr_get_iter(&iter))) { \
239 physcode; \
240 physr_incr_iter(&iter); \
242 region_incr_iter(&v_iter); \
246 #define MYSLABSANE(s) MYASSERT(slabsane_f(__FILE__, __LINE__, s, sizeof(*(s))))
247 /* Basic pointers check. */
248 ALLREGIONS(MYSLABSANE(vr),MYSLABSANE(pr); MYSLABSANE(pr->ph);MYSLABSANE(pr->parent));
249 ALLREGIONS(/* MYASSERT(vr->parent == vmp) */,MYASSERT(pr->parent == vr););
251 /* Do counting for consistency check. */
252 ALLREGIONS(;,USE(pr->ph, pr->ph->seencount = 0;););
253 ALLREGIONS(;,USE(pr->ph, pr->ph->seencount++;);
254 if(pr->ph->seencount == 1) {
255 if(!(pr->parent->flags & VR_DIRECT)) {
256 MYASSERT(usedpages_add(pr->ph->phys,
257 VM_PAGE_SIZE) == OK);
262 /* Do consistency check. */
263 ALLREGIONS({ struct vir_region *nextvr = getnextvr(vr);
264 if(nextvr) {
265 MYASSERT(vr->vaddr < nextvr->vaddr);
266 MYASSERT(vr->vaddr + vr->length <= nextvr->vaddr);
269 MYASSERT(!(vr->vaddr % VM_PAGE_SIZE));,
270 if(pr->ph->refcount != pr->ph->seencount) {
271 map_printmap(vmp);
272 printf("ph in vr %p: 0x%lx refcount %u "
273 "but seencount %u\n",
274 vr, pr->offset,
275 pr->ph->refcount, pr->ph->seencount);
278 int n_others = 0;
279 struct phys_region *others;
280 if(pr->ph->refcount > 0) {
281 MYASSERT(pr->ph->firstregion);
282 if(pr->ph->refcount == 1) {
283 MYASSERT(pr->ph->firstregion == pr);
285 } else {
286 MYASSERT(!pr->ph->firstregion);
288 for(others = pr->ph->firstregion; others;
289 others = others->next_ph_list) {
290 MYSLABSANE(others);
291 MYASSERT(others->ph == pr->ph);
292 n_others++;
294 MYASSERT(pr->ph->refcount == n_others);
296 MYASSERT(pr->ph->refcount == pr->ph->seencount);
297 MYASSERT(!(pr->offset % VM_PAGE_SIZE)););
298 ALLREGIONS(,MYASSERT(map_sanitycheck_pt(vmp, vr, pr) == OK));
301 #define LRUCHECK lrucheck()
303 static void lrucheck(void)
305 yielded_t *list;
307 /* list is empty and ok if both ends point to null. */
308 if(!lru_youngest && !lru_oldest)
309 return;
311 /* if not, both should point to something. */
312 SLABSANE(lru_youngest);
313 SLABSANE(lru_oldest);
315 assert(!lru_youngest->younger);
316 assert(!lru_oldest->older);
318 for(list = lru_youngest; list; list = list->older) {
319 SLABSANE(list);
320 if(list->younger) {
321 SLABSANE(list->younger);
322 assert(list->younger->older == list);
323 } else assert(list == lru_youngest);
324 if(list->older) {
325 SLABSANE(list->older);
326 assert(list->older->younger == list);
327 } else assert(list == lru_oldest);
331 void blockstats(void)
333 yielded_t *list;
334 int blocks = 0;
335 phys_bytes mem = 0;
336 clock_t ticks;
337 int s;
339 s = getuptime(&ticks);
341 assert(s == OK);
343 LRUCHECK;
345 for(list = lru_youngest; list; list = list->older) {
346 mem += VM_PAGE_SIZE;
347 blocks++;
350 if(blocks > 0)
351 printf("%d blocks, %lukB; ", blocks, mem/1024);
353 printmemstats();
355 #else
356 #define LRUCHECK
357 #endif
360 /*=========================================================================*
361 * map_ph_writept *
362 *=========================================================================*/
363 static int map_ph_writept(struct vmproc *vmp, struct vir_region *vr,
364 struct phys_region *pr)
366 int rw;
367 struct phys_block *pb = pr->ph;
369 assert(!(vr->vaddr % VM_PAGE_SIZE));
370 assert(!(pr->offset % VM_PAGE_SIZE));
371 assert(pb->refcount > 0);
373 if(WRITABLE(vr, pb))
374 rw = PTF_WRITE;
375 else
376 rw = PTF_READ;
378 if(pt_writemap(vmp, &vmp->vm_pt, vr->vaddr + pr->offset,
379 pb->phys, VM_PAGE_SIZE, PTF_PRESENT | PTF_USER | rw,
380 #if SANITYCHECKS
381 !pr->written ? 0 :
382 #endif
383 WMF_OVERWRITE) != OK) {
384 printf("VM: map_writept: pt_writemap failed\n");
385 return ENOMEM;
388 #if SANITYCHECKS
389 USE(pr, pr->written = 1;);
390 #endif
392 return OK;
395 #define SLOT_FAIL ((vir_bytes) -1)
397 /*===========================================================================*
398 * region_find_slot_range *
399 *===========================================================================*/
400 static vir_bytes region_find_slot_range(struct vmproc *vmp,
401 vir_bytes minv, vir_bytes maxv, vir_bytes length)
403 struct vir_region *lastregion;
404 vir_bytes startv = 0;
405 int foundflag = 0;
406 region_iter iter;
408 SANITYCHECK(SCL_FUNCTIONS);
410 /* Length must be reasonable. */
411 assert(length > 0);
413 /* Special case: allow caller to set maxv to 0 meaning 'I want
414 * it to be mapped in right here.'
416 if(maxv == 0) {
417 maxv = minv + length;
419 /* Sanity check. */
420 if(maxv <= minv) {
421 printf("region_find_slot: minv 0x%lx and bytes 0x%lx\n",
422 minv, length);
423 return SLOT_FAIL;
427 /* Basic input sanity checks. */
428 assert(!(length % VM_PAGE_SIZE));
429 if(minv >= maxv) {
430 printf("VM: 1 minv: 0x%lx maxv: 0x%lx length: 0x%lx\n",
431 minv, maxv, length);
434 assert(minv < maxv);
436 if(minv + length > maxv)
437 return SLOT_FAIL;
439 #define FREEVRANGE_TRY(rangestart, rangeend) { \
440 vir_bytes frstart = (rangestart), frend = (rangeend); \
441 frstart = MAX(frstart, minv); \
442 frend = MIN(frend, maxv); \
443 if(frend > frstart && (frend - frstart) >= length) { \
444 startv = frend-length; \
445 foundflag = 1; \
448 #define FREEVRANGE(start, end) { \
449 assert(!foundflag); \
450 FREEVRANGE_TRY(((start)+VM_PAGE_SIZE), ((end)-VM_PAGE_SIZE)); \
451 if(!foundflag) { \
452 FREEVRANGE_TRY((start), (end)); \
456 /* find region after maxv. */
457 region_start_iter(&vmp->vm_regions_avl, &iter, maxv, AVL_GREATER_EQUAL);
458 lastregion = region_get_iter(&iter);
460 if(!lastregion) {
461 /* This is the free virtual address space after the last region. */
462 region_start_iter(&vmp->vm_regions_avl, &iter, maxv, AVL_LESS);
463 lastregion = region_get_iter(&iter);
464 FREEVRANGE(lastregion ?
465 lastregion->vaddr+lastregion->length : 0, VM_DATATOP);
468 if(!foundflag) {
469 struct vir_region *vr;
470 while((vr = region_get_iter(&iter)) && !foundflag) {
471 struct vir_region *nextvr;
472 region_decr_iter(&iter);
473 nextvr = region_get_iter(&iter);
474 FREEVRANGE(nextvr ? nextvr->vaddr+nextvr->length : 0,
475 vr->vaddr);
479 if(!foundflag) {
480 printf("VM: region_find_slot: no 0x%lx bytes found for %d between 0x%lx and 0x%lx\n",
481 length, vmp->vm_endpoint, minv, maxv);
482 util_stacktrace();
483 return SLOT_FAIL;
486 /* However we got it, startv must be in the requested range. */
487 assert(startv >= minv);
488 assert(startv < maxv);
489 assert(startv + length <= maxv);
491 /* remember this position as a hint for next time. */
492 vmp->vm_region_top = startv + length;
494 return startv;
497 /*===========================================================================*
498 * region_find_slot *
499 *===========================================================================*/
500 static vir_bytes region_find_slot(struct vmproc *vmp,
501 vir_bytes minv, vir_bytes maxv, vir_bytes length)
503 vir_bytes v, hint = vmp->vm_region_top;
505 /* use the top of the last inserted region as a minv hint if
506 * possible. remember that a zero maxv is a special case.
509 if(maxv && hint < maxv && hint >= minv) {
510 v = region_find_slot_range(vmp, minv, hint, length);
512 if(v != SLOT_FAIL)
513 return v;
516 return region_find_slot_range(vmp, minv, maxv, length);
519 struct vir_region *region_new(struct vmproc *vmp, vir_bytes startv, vir_bytes length, int flags)
521 physr_avl *phavl;
522 struct vir_region *newregion;
524 if(!(SLABALLOC(newregion))) {
525 printf("vm: region_new: could not allocate\n");
526 return NULL;
529 /* Fill in node details. */
530 USE(newregion,
531 newregion->vaddr = startv;
532 newregion->length = length;
533 newregion->flags = flags;
534 newregion->tag = VRT_NONE;
535 newregion->lower = newregion->higher = NULL;
536 newregion->parent = vmp;);
538 SLABALLOC(phavl);
539 if(!phavl) {
540 printf("VM: region_new: allocating phys avl failed\n");
541 SLABFREE(newregion);
542 return NULL;
544 USE(newregion, newregion->phys = phavl;);
545 physr_init(newregion->phys);
547 return newregion;
550 /*===========================================================================*
551 * map_page_region *
552 *===========================================================================*/
553 struct vir_region *map_page_region(vmp, minv, maxv, length,
554 what, flags, mapflags)
555 struct vmproc *vmp;
556 vir_bytes minv;
557 vir_bytes maxv;
558 vir_bytes length;
559 vir_bytes what;
560 u32_t flags;
561 int mapflags;
563 struct vir_region *newregion;
564 vir_bytes startv;
566 assert(!(length % VM_PAGE_SIZE));
568 SANITYCHECK(SCL_FUNCTIONS);
570 if((flags & VR_CONTIG) && !(mapflags & MF_PREALLOC)) {
571 printf("map_page_region: can't make contiguous allocation without preallocating\n");
572 return NULL;
575 startv = region_find_slot(vmp, minv, maxv, length);
576 if (startv == SLOT_FAIL)
577 return NULL;
579 /* Now we want a new region. */
580 if(!(newregion = region_new(vmp, startv, length, flags))) {
581 printf("VM: map_page_region: allocating region failed\n");
582 return NULL;
585 /* If we know what we're going to map to, map it right away. */
586 if(what != MAP_NONE) {
587 assert(!(what % VM_PAGE_SIZE));
588 assert(!(startv % VM_PAGE_SIZE));
589 assert(!(mapflags & MF_PREALLOC));
590 if(map_new_physblock(vmp, newregion, 0, length,
591 what, PAF_CLEAR, 0) != OK) {
592 printf("VM: map_new_physblock failed\n");
593 USE(newregion,
594 SLABFREE(newregion->phys););
595 SLABFREE(newregion);
596 return NULL;
600 if((flags & VR_ANON) && (mapflags & MF_PREALLOC)) {
601 if(map_handle_memory(vmp, newregion, 0, length, 1) != OK) {
602 printf("VM: map_page_region: prealloc failed\n");
603 USE(newregion,
604 SLABFREE(newregion->phys););
605 SLABFREE(newregion);
606 return NULL;
610 /* Pre-allocations should be uninitialized, but after that it's a
611 * different story.
613 USE(newregion, newregion->flags &= ~VR_UNINITIALIZED;);
615 /* Link it. */
616 region_insert(&vmp->vm_regions_avl, newregion);
618 #if SANITYCHECKS
619 assert(startv == newregion->vaddr);
621 struct vir_region *nextvr;
622 if((nextvr = getnextvr(newregion))) {
623 assert(newregion->vaddr < nextvr->vaddr);
626 #endif
628 SANITYCHECK(SCL_FUNCTIONS);
630 return newregion;
633 static struct phys_region *reset_physr_iter(struct vir_region *region,
634 physr_iter *iter, vir_bytes offset)
636 struct phys_region *ph;
638 physr_start_iter(region->phys, iter, offset, AVL_EQUAL);
639 ph = physr_get_iter(iter);
640 assert(ph);
641 assert(ph->offset == offset);
643 return ph;
646 /*===========================================================================*
647 * map_subfree *
648 *===========================================================================*/
649 static int map_subfree(struct vir_region *region,
650 vir_bytes start, vir_bytes len)
652 struct phys_region *pr;
653 physr_iter iter;
654 vir_bytes end = start+len;
656 int full = 0;
658 #if SANITYCHECKS
660 SLABSANE(region);
661 SLABSANE(region->phys);
662 physr_start_iter_least(region->phys, &iter);
663 while((pr = physr_get_iter(&iter))) {
664 struct phys_region *others;
665 struct phys_block *pb;
667 pb = pr->ph;
669 for(others = pb->firstregion; others;
670 others = others->next_ph_list) {
671 assert(others->ph == pb);
673 physr_incr_iter(&iter);
676 #endif
678 if(start == 0 && len == region->length)
679 full = 1;
681 physr_init_iter(&iter);
682 physr_start_iter(region->phys, &iter, start, AVL_GREATER_EQUAL);
683 while((pr = physr_get_iter(&iter))) {
684 physr_incr_iter(&iter);
685 if(pr->offset >= end)
686 break;
687 pb_unreferenced(region, pr, !full);
688 if(!full) {
689 physr_start_iter(region->phys, &iter,
690 pr->offset, AVL_GREATER_EQUAL);
692 SLABFREE(pr);
695 if(full)
696 physr_init(region->phys);
698 return OK;
701 /*===========================================================================*
702 * map_free *
703 *===========================================================================*/
704 static int map_free(struct vir_region *region)
706 int r;
708 if((r=map_subfree(region, 0, region->length)) != OK) {
709 printf("%d\n", __LINE__);
710 return r;
713 USE(region,
714 SLABFREE(region->phys););
715 SLABFREE(region);
717 return OK;
720 /*===========================================================================*
721 * yielded_block_cmp *
722 *===========================================================================*/
723 int yielded_block_cmp(struct block_id *id1, struct block_id *id2)
725 if(id1->owner < id2->owner)
726 return -1;
727 if(id1->owner > id2->owner)
728 return 1;
729 return cmp64(id1->id, id2->id);
733 /*===========================================================================*
734 * free_yielded_proc *
735 *===========================================================================*/
736 static vir_bytes free_yielded_proc(struct vmproc *vmp)
738 vir_bytes total = 0;
739 int h;
741 SANITYCHECK(SCL_FUNCTIONS);
743 /* Free associated regions. */
744 for(h = 0; h < YIELD_HASHSIZE && vmp->vm_yielded > 0; h++) {
745 yielded_t *yb;
746 yielded_iter iter;
747 yielded_avl *avl = &vm_yielded_blocks[h];
748 yielded_start_iter_least(avl, &iter);
749 while((yb = yielded_get_iter(&iter))) {
750 yielded_t *next_yb;
751 SLABSANE(yb);
752 yielded_incr_iter(&iter);
753 if(yb->id.owner != vmp->vm_endpoint)
754 continue;
755 next_yb = yielded_get_iter(&iter);
756 total += freeyieldednode(yb, 1);
757 /* the above removal invalidated our iter; restart it
758 * for the node we want to start at.
760 if(!next_yb) break;
761 yielded_start_iter(avl, &iter, next_yb->id, AVL_EQUAL);
762 assert(yielded_get_iter(&iter) == next_yb);
766 return total;
770 static phys_bytes freeyieldednode(yielded_t *node, int freemem)
772 yielded_t *older, *younger, *removed;
773 yielded_avl *avl;
774 int p;
776 SLABSANE(node);
778 LRUCHECK;
780 /* Update LRU. */
782 younger = node->younger;
783 older = node->older;
785 if(younger) {
786 SLABSANE(younger);
787 assert(younger->older == node);
788 USE(younger, younger->older = node->older;);
789 } else {
790 assert(node == lru_youngest);
791 lru_youngest = node->older;
794 if(older) {
795 SLABSANE(older);
796 assert(older->younger == node);
797 USE(older, older->younger = node->younger;);
798 } else {
799 assert(node == lru_oldest);
800 lru_oldest = node->younger;
803 LRUCHECK;
805 /* Update AVL. */
807 if(vm_isokendpt(node->id.owner, &p) != OK)
808 panic("out of date owner of yielded block %d", node->id.owner);
809 avl = get_yielded_avl(node->id);
810 removed = yielded_remove(avl, node->id);
811 assert(removed == node);
812 assert(vmproc[p].vm_yielded > 0);
813 vmproc[p].vm_yielded--;
815 /* Free associated memory if requested. */
817 if(freemem) {
818 free_mem(ABS2CLICK(node->physaddr), node->pages);
821 /* Free node. */
822 SLABFREE(node);
824 return VM_PAGE_SIZE;
827 /*========================================================================*
828 * free_yielded *
829 *========================================================================*/
830 vir_bytes free_yielded(vir_bytes max_bytes)
833 /* PRIVATE yielded_t *lru_youngest = NULL, *lru_oldest = NULL; */
834 vir_bytes freed = 0;
835 int blocks = 0;
837 while(freed < max_bytes && lru_oldest) {
838 SLABSANE(lru_oldest);
839 freed += freeyieldednode(lru_oldest, 1);
840 blocks++;
843 return freed;
846 /*========================================================================*
847 * map_free_proc *
848 *========================================================================*/
849 int map_free_proc(vmp)
850 struct vmproc *vmp;
852 struct vir_region *r;
854 while((r = region_search_root(&vmp->vm_regions_avl))) {
855 SANITYCHECK(SCL_DETAIL);
856 #if SANITYCHECKS
857 nocheck++;
858 #endif
859 region_remove(&vmp->vm_regions_avl, r->vaddr); /* For sanity checks. */
860 map_free(r);
861 #if SANITYCHECKS
862 nocheck--;
863 #endif
864 SANITYCHECK(SCL_DETAIL);
867 region_init(&vmp->vm_regions_avl);
869 /* Free associated yielded blocks. */
870 free_yielded_proc(vmp);
872 SANITYCHECK(SCL_FUNCTIONS);
874 return OK;
877 /*===========================================================================*
878 * map_lookup *
879 *===========================================================================*/
880 struct vir_region *map_lookup(vmp, offset, physr)
881 struct vmproc *vmp;
882 vir_bytes offset;
883 struct phys_region **physr;
885 struct vir_region *r;
887 SANITYCHECK(SCL_FUNCTIONS);
889 #if SANITYCHECKS
890 if(!region_search_root(&vmp->vm_regions_avl))
891 panic("process has no regions: %d", vmp->vm_endpoint);
892 #endif
894 if((r = region_search(&vmp->vm_regions_avl, offset, AVL_LESS_EQUAL))) {
895 vir_bytes ph;
896 if(offset >= r->vaddr && offset < r->vaddr + r->length) {
897 ph = offset - r->vaddr;
898 if(physr) {
899 *physr = physr_search(r->phys, ph, AVL_EQUAL);
900 assert((*physr)->offset == ph);
902 return r;
906 SANITYCHECK(SCL_FUNCTIONS);
908 return NULL;
911 static u32_t vrallocflags(u32_t flags)
913 u32_t allocflags = 0;
915 if(flags & VR_PHYS64K)
916 allocflags |= PAF_ALIGN64K;
917 if(flags & VR_LOWER16MB)
918 allocflags |= PAF_LOWER16MB;
919 if(flags & VR_LOWER1MB)
920 allocflags |= PAF_LOWER1MB;
921 if(flags & VR_CONTIG)
922 allocflags |= PAF_CONTIG;
924 return allocflags;
927 /*===========================================================================*
928 * map_new_physblock *
929 *===========================================================================*/
930 static int map_new_physblock(vmp, region, start_offset, length,
931 what_mem, allocflags, written)
932 struct vmproc *vmp;
933 struct vir_region *region;
934 vir_bytes start_offset;
935 vir_bytes length;
936 phys_bytes what_mem;
937 u32_t allocflags;
938 int written;
940 struct memlist *memlist, *ml;
941 int r;
942 vir_bytes mapped = 0;
943 vir_bytes offset = start_offset;
945 SANITYCHECK(SCL_FUNCTIONS);
947 assert(!(length % VM_PAGE_SIZE));
949 if((region->flags & VR_CONTIG) &&
950 (start_offset > 0 || length < region->length)) {
951 printf("VM: region length 0x%lx, offset 0x%lx length 0x%lx\n",
952 region->length, start_offset, length);
953 map_printmap(vmp);
954 printf("VM: map_new_physblock: non-full contig allocation requested\n");
955 return EFAULT;
958 /* Memory for new physical block. */
959 allocflags |= vrallocflags(region->flags);
961 if(allocflags & PAF_CONTIG) {
962 assert(what_mem == MAP_NONE);
963 if((what_mem = alloc_mem(length/VM_PAGE_SIZE, allocflags)) == NO_MEM) {
964 return ENOMEM;
966 what_mem = CLICK2ABS(what_mem);
967 allocflags &= ~PAF_CONTIG;
968 assert(what_mem != MAP_NONE);
971 if(!(memlist = alloc_mem_in_list(length, allocflags, what_mem))) {
972 printf("map_new_physblock: couldn't allocate\n");
973 return ENOMEM;
976 r = OK;
978 for(ml = memlist; ml; ml = ml->next) {
979 struct phys_region *newphysr = NULL;
980 struct phys_block *newpb = NULL;
982 /* Allocate things necessary for this chunk of memory. */
983 if(!(newpb = pb_new(ml->phys)) ||
984 !(newphysr = pb_reference(newpb, offset, region))) {
985 printf("map_new_physblock: no memory for the ph slabs\n");
986 assert(!newphysr);
987 if(newpb) SLABFREE(newpb);
988 r = ENOMEM;
989 break;
992 /* Update pagetable. */
993 if(map_ph_writept(vmp, region, newphysr) != OK) {
994 printf("map_new_physblock: map_ph_writept failed\n");
995 r = ENOMEM;
996 break;
999 offset += VM_PAGE_SIZE;
1000 mapped += VM_PAGE_SIZE;
1003 if(r != OK) {
1004 offset = start_offset;
1005 /* Things did not go well. Undo everything. */
1006 for(ml = memlist; ml; ml = ml->next) {
1007 struct phys_region *physr;
1008 if((physr = physr_search(region->phys, offset,
1009 AVL_EQUAL))) {
1010 assert(physr->ph->refcount == 1);
1011 pb_unreferenced(region, physr, 1);
1012 SLABFREE(physr);
1014 offset += VM_PAGE_SIZE;
1016 } else assert(mapped == length);
1018 /* Always clean up the memlist itself, even if everything
1019 * worked we're not using the memlist nodes any more. And
1020 * the memory they reference is either freed above or in use.
1022 free_mem_list(memlist, 0);
1024 SANITYCHECK(SCL_FUNCTIONS);
1026 return r;
1029 /*===========================================================================*
1030 * map_clone_ph_block *
1031 *===========================================================================*/
1032 static struct phys_region *map_clone_ph_block(vmp, region, ph, iter)
1033 struct vmproc *vmp;
1034 struct vir_region *region;
1035 struct phys_region *ph;
1036 physr_iter *iter;
1038 vir_bytes offset;
1039 u32_t allocflags;
1040 phys_bytes physaddr;
1041 struct phys_region *newpr;
1042 int region_has_single_block;
1043 int written = 0;
1044 #if SANITYCHECKS
1045 written = ph->written;
1046 #endif
1047 SANITYCHECK(SCL_FUNCTIONS);
1049 /* Warning: this function will free the passed
1050 * phys_region *ph and replace it (in the same offset)
1051 * with another! So both the pointer to it
1052 * and any iterators over the phys_regions in the vir_region
1053 * will be invalid on successful return. (Iterators over
1054 * the vir_region could be invalid on unsuccessful return too.)
1057 /* This is only to be done if there is more than one copy. */
1058 assert(ph->ph->refcount > 1);
1060 /* This function takes a physical block, copies its contents
1061 * into newly allocated memory, and replaces the single physical
1062 * block by one or more physical blocks with refcount 1 with the
1063 * same contents as the original. In other words, a fragmentable
1064 * version of map_copy_ph_block().
1067 /* Remember where and how much. */
1068 offset = ph->offset;
1069 physaddr = ph->ph->phys;
1071 /* Now unlink the original physical block so we can replace
1072 * it with new ones.
1075 SLABSANE(ph);
1076 SLABSANE(ph->ph);
1077 assert(ph->ph->refcount > 1);
1078 pb_unreferenced(region, ph, 1);
1079 assert(ph->ph->refcount >= 1);
1080 SLABFREE(ph);
1082 SANITYCHECK(SCL_DETAIL);
1084 /* Put new free memory in. */
1085 allocflags = vrallocflags(region->flags);
1086 region_has_single_block = (offset == 0 && region->length == VM_PAGE_SIZE);
1087 assert(region_has_single_block || !(allocflags & PAF_CONTIG));
1088 assert(!(allocflags & PAF_CLEAR));
1090 if(map_new_physblock(vmp, region, offset, VM_PAGE_SIZE,
1091 MAP_NONE, allocflags, written) != OK) {
1092 /* XXX original range now gone. */
1093 printf("VM: map_clone_ph_block: map_new_physblock failed.\n");
1094 return NULL;
1097 /* Copy the block to the new memory.
1098 * Can only fail if map_new_physblock didn't do what we asked.
1100 if(copy_abs2region(physaddr, region, offset, VM_PAGE_SIZE) != OK)
1101 panic("copy_abs2region failed, no good reason for that");
1103 newpr = physr_search(region->phys, offset, AVL_EQUAL);
1104 assert(newpr);
1105 assert(newpr->offset == offset);
1107 if(iter) {
1108 physr_start_iter(region->phys, iter, offset, AVL_EQUAL);
1109 assert(physr_get_iter(iter) == newpr);
1112 SANITYCHECK(SCL_FUNCTIONS);
1114 return newpr;
1118 /*===========================================================================*
1119 * map_pf *
1120 *===========================================================================*/
1121 int map_pf(vmp, region, offset, write)
1122 struct vmproc *vmp;
1123 struct vir_region *region;
1124 vir_bytes offset;
1125 int write;
1127 vir_bytes virpage;
1128 struct phys_region *ph;
1129 int r = OK;
1131 assert(offset >= 0);
1132 assert(offset < region->length);
1134 assert(region->flags & VR_ANON);
1135 assert(!(region->vaddr % VM_PAGE_SIZE));
1137 virpage = offset - offset % VM_PAGE_SIZE;
1139 SANITYCHECK(SCL_FUNCTIONS);
1141 if((ph = physr_search(region->phys, offset, AVL_LESS_EQUAL)) &&
1142 (ph->offset <= offset && offset < ph->offset + VM_PAGE_SIZE)) {
1143 /* Pagefault in existing block. Do copy-on-write. */
1144 assert(write);
1145 assert(region->flags & VR_WRITABLE);
1146 assert(ph->ph->refcount > 0);
1148 if(WRITABLE(region, ph->ph)) {
1149 r = map_ph_writept(vmp, region, ph);
1150 if(r != OK)
1151 printf("map_ph_writept failed\n");
1152 } else {
1153 if(ph->ph->refcount > 0
1154 && ph->ph->share_flag != PBSH_COW) {
1155 printf("VM: write RO mapped pages.\n");
1156 return EFAULT;
1157 } else {
1158 if(!map_clone_ph_block(vmp, region, ph, NULL))
1159 r = ENOMEM;
1162 } else {
1163 /* Pagefault in non-existing block. Map in new block. */
1164 if(map_new_physblock(vmp, region, virpage,
1165 VM_PAGE_SIZE, MAP_NONE, PAF_CLEAR, 0) != OK) {
1166 printf("map_new_physblock failed\n");
1167 r = ENOMEM;
1171 SANITYCHECK(SCL_FUNCTIONS);
1173 if(r != OK) {
1174 printf("VM: map_pf: failed (%d)\n", r);
1175 return r;
1178 #if SANITYCHECKS
1179 if(OK != pt_checkrange(&vmp->vm_pt, region->vaddr+virpage,
1180 VM_PAGE_SIZE, write)) {
1181 panic("map_pf: pt_checkrange failed: %d", r);
1183 #endif
1185 return r;
1188 /*===========================================================================*
1189 * map_pin_memory *
1190 *===========================================================================*/
1191 int map_pin_memory(struct vmproc *vmp)
1193 struct vir_region *vr;
1194 int r;
1195 region_iter iter;
1196 region_start_iter_least(&vmp->vm_regions_avl, &iter);
1197 /* Scan all memory regions. */
1198 while((vr = region_get_iter(&iter))) {
1199 /* Make sure region is mapped to physical memory and writable.*/
1200 r = map_handle_memory(vmp, vr, 0, vr->length, 1);
1201 if(r != OK) {
1202 panic("map_pin_memory: map_handle_memory failed: %d", r);
1204 region_incr_iter(&iter);
1206 return OK;
1209 /*===========================================================================*
1210 * map_handle_memory *
1211 *===========================================================================*/
1212 int map_handle_memory(vmp, region, offset, length, write)
1213 struct vmproc *vmp;
1214 struct vir_region *region;
1215 vir_bytes offset, length;
1216 int write;
1218 struct phys_region *physr, *nextphysr;
1219 int changes = 0;
1220 physr_iter iter;
1221 u32_t allocflags = 0;
1223 if(!(region->flags & VR_UNINITIALIZED)) {
1224 allocflags = PAF_CLEAR;
1227 #define FREE_RANGE_HERE(er1, er2) { \
1228 struct phys_region *r1 = (er1), *r2 = (er2); \
1229 vir_bytes start = offset, end = offset + length; \
1230 if(r1) { \
1231 start = MAX(start, r1->offset + VM_PAGE_SIZE); } \
1232 if(r2) { \
1233 end = MIN(end, r2->offset); } \
1234 if(start < end) { \
1235 SANITYCHECK(SCL_DETAIL); \
1236 if(map_new_physblock(vmp, region, start, \
1237 end-start, MAP_NONE, allocflags, 0) != OK) { \
1238 SANITYCHECK(SCL_DETAIL); \
1239 return ENOMEM; \
1241 changes++; \
1245 SANITYCHECK(SCL_FUNCTIONS);
1247 assert(region->flags & VR_ANON);
1248 assert(!(region->vaddr % VM_PAGE_SIZE));
1249 assert(!(offset % VM_PAGE_SIZE));
1250 assert(!(length % VM_PAGE_SIZE));
1251 assert(!write || (region->flags & VR_WRITABLE));
1253 physr_start_iter(region->phys, &iter, offset, AVL_LESS_EQUAL);
1254 physr = physr_get_iter(&iter);
1256 if(!physr) {
1257 physr_start_iter(region->phys, &iter, offset, AVL_GREATER_EQUAL);
1258 physr = physr_get_iter(&iter);
1261 FREE_RANGE_HERE(NULL, physr);
1263 if(physr) {
1264 physr = reset_physr_iter(region, &iter, physr->offset);
1265 if(physr->offset + VM_PAGE_SIZE <= offset) {
1266 physr_incr_iter(&iter);
1267 physr = physr_get_iter(&iter);
1269 FREE_RANGE_HERE(NULL, physr);
1270 if(physr) {
1271 physr = reset_physr_iter(region, &iter,
1272 physr->offset);
1277 while(physr) {
1278 int r;
1280 SANITYCHECK(SCL_DETAIL);
1282 if(write) {
1283 assert(physr->ph->refcount > 0);
1284 if(!WRITABLE(region, physr->ph)) {
1285 if(!(physr = map_clone_ph_block(vmp, region,
1286 physr, &iter))) {
1287 printf("VM: map_handle_memory: no copy\n");
1288 return ENOMEM;
1290 changes++;
1291 } else {
1292 SANITYCHECK(SCL_DETAIL);
1293 if((r=map_ph_writept(vmp, region, physr)) != OK) {
1294 printf("VM: map_ph_writept failed\n");
1295 return r;
1297 changes++;
1298 SANITYCHECK(SCL_DETAIL);
1302 SANITYCHECK(SCL_DETAIL);
1303 physr_incr_iter(&iter);
1304 nextphysr = physr_get_iter(&iter);
1305 FREE_RANGE_HERE(physr, nextphysr);
1306 SANITYCHECK(SCL_DETAIL);
1307 if(nextphysr) {
1308 if(nextphysr->offset >= offset + length)
1309 break;
1310 nextphysr = reset_physr_iter(region, &iter,
1311 nextphysr->offset);
1313 physr = nextphysr;
1316 SANITYCHECK(SCL_FUNCTIONS);
1318 if(changes < 1) {
1319 #if VERBOSE
1320 printf("region start at 0x%lx offset 0x%lx len 0x%lx write %d\n",
1321 region->vaddr, offset, length, write);
1322 printf("no changes in map_handle_memory\n");
1323 #endif
1324 return EFAULT;
1327 #if SANITYCHECKS
1328 if(OK != pt_checkrange(&vmp->vm_pt, region->vaddr+offset, length, write)) {
1329 printf("handle mem 0x%lx-0x%lx failed\n",
1330 region->vaddr+offset,region->vaddr+offset+length);
1331 map_printregion(vmp, region);
1332 panic("checkrange failed");
1334 #endif
1336 return OK;
1339 #if SANITYCHECKS
1340 static int count_phys_regions(struct vir_region *vr)
1342 int n = 0;
1343 struct phys_region *ph;
1344 physr_iter iter;
1345 physr_start_iter_least(vr->phys, &iter);
1346 while((ph = physr_get_iter(&iter))) {
1347 n++;
1348 physr_incr_iter(&iter);
1350 return n;
1352 #endif
1354 /*===========================================================================*
1355 * map_copy_region *
1356 *===========================================================================*/
1357 static struct vir_region *map_copy_region(struct vmproc *vmp, struct vir_region *vr)
1359 /* map_copy_region creates a complete copy of the vir_region
1360 * data structure, linking in the same phys_blocks directly,
1361 * but all in limbo, i.e., the caller has to link the vir_region
1362 * to a process. Therefore it doesn't increase the refcount in
1363 * the phys_block; the caller has to do this once it's linked.
1364 * The reason for this is to keep the sanity checks working
1365 * within this function.
1367 struct vir_region *newvr;
1368 struct phys_region *ph;
1369 physr_iter iter;
1370 #if SANITYCHECKS
1371 int cr;
1372 cr = count_phys_regions(vr);
1373 #endif
1375 if(!(newvr = region_new(vr->parent, vr->vaddr, vr->length, vr->flags)))
1376 return NULL;
1378 physr_start_iter_least(vr->phys, &iter);
1379 while((ph = physr_get_iter(&iter))) {
1380 struct phys_region *newph = pb_reference(ph->ph, ph->offset, newvr);
1382 if(!newph) { map_free(newvr); return NULL; }
1384 #if SANITYCHECKS
1385 USE(newph, newph->written = 0;);
1386 assert(count_phys_regions(vr) == cr);
1387 #endif
1388 physr_incr_iter(&iter);
1391 #if SANITYCHECKS
1392 assert(count_phys_regions(vr) == count_phys_regions(newvr));
1393 #endif
1395 return newvr;
1398 /*===========================================================================*
1399 * copy_abs2region *
1400 *===========================================================================*/
1401 int copy_abs2region(phys_bytes abs, struct vir_region *destregion,
1402 phys_bytes offset, phys_bytes len)
1405 assert(destregion);
1406 assert(destregion->phys);
1407 while(len > 0) {
1408 phys_bytes sublen, suboffset;
1409 struct phys_region *ph;
1410 assert(destregion);
1411 assert(destregion->phys);
1412 if(!(ph = physr_search(destregion->phys, offset, AVL_LESS_EQUAL))) {
1413 printf("VM: copy_abs2region: no phys region found (1).\n");
1414 return EFAULT;
1416 assert(ph->offset <= offset);
1417 if(ph->offset+VM_PAGE_SIZE <= offset) {
1418 printf("VM: copy_abs2region: no phys region found (2).\n");
1419 return EFAULT;
1421 suboffset = offset - ph->offset;
1422 assert(suboffset < VM_PAGE_SIZE);
1423 sublen = len;
1424 if(sublen > VM_PAGE_SIZE - suboffset)
1425 sublen = VM_PAGE_SIZE - suboffset;
1426 assert(suboffset + sublen <= VM_PAGE_SIZE);
1427 if(ph->ph->refcount != 1) {
1428 printf("VM: copy_abs2region: refcount not 1.\n");
1429 return EFAULT;
1432 if(sys_abscopy(abs, ph->ph->phys + suboffset, sublen) != OK) {
1433 printf("VM: copy_abs2region: abscopy failed.\n");
1434 return EFAULT;
1436 abs += sublen;
1437 offset += sublen;
1438 len -= sublen;
1441 return OK;
1444 /*=========================================================================*
1445 * map_writept *
1446 *=========================================================================*/
1447 int map_writept(struct vmproc *vmp)
1449 struct vir_region *vr;
1450 struct phys_region *ph;
1451 int r;
1452 region_iter v_iter;
1453 region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
1455 while((vr = region_get_iter(&v_iter))) {
1456 physr_iter ph_iter;
1457 physr_start_iter_least(vr->phys, &ph_iter);
1459 while((ph = physr_get_iter(&ph_iter))) {
1460 physr_incr_iter(&ph_iter);
1462 /* If this phys block is shared as SMAP, then do
1463 * not update the page table. */
1464 if(ph->ph->refcount > 1
1465 && ph->ph->share_flag == PBSH_SMAP) {
1466 continue;
1469 if((r=map_ph_writept(vmp, vr, ph)) != OK) {
1470 printf("VM: map_writept: failed\n");
1471 return r;
1474 region_incr_iter(&v_iter);
1477 return OK;
1480 /*========================================================================*
1481 * map_proc_copy *
1482 *========================================================================*/
1483 int map_proc_copy(dst, src)
1484 struct vmproc *dst;
1485 struct vmproc *src;
1487 /* Copy all the memory regions from the src process to the dst process. */
1488 region_init(&dst->vm_regions_avl);
1490 return map_proc_copy_from(dst, src, NULL);
1493 /*========================================================================*
1494 * map_proc_copy_from *
1495 *========================================================================*/
1496 int map_proc_copy_from(dst, src, start_src_vr)
1497 struct vmproc *dst;
1498 struct vmproc *src;
1499 struct vir_region *start_src_vr;
1501 struct vir_region *vr;
1502 region_iter v_iter;
1504 if(!start_src_vr)
1505 start_src_vr = region_search_least(&src->vm_regions_avl);
1507 assert(start_src_vr);
1508 assert(start_src_vr->parent == src);
1509 region_start_iter(&src->vm_regions_avl, &v_iter,
1510 start_src_vr->vaddr, AVL_EQUAL);
1511 assert(region_get_iter(&v_iter) == start_src_vr);
1513 /* Copy source regions after the destination's last region (if any). */
1515 SANITYCHECK(SCL_FUNCTIONS);
1517 while((vr = region_get_iter(&v_iter))) {
1518 physr_iter iter_orig, iter_new;
1519 struct vir_region *newvr;
1520 struct phys_region *orig_ph, *new_ph;
1521 if(!(newvr = map_copy_region(dst, vr))) {
1522 map_free_proc(dst);
1523 return ENOMEM;
1525 USE(newvr, newvr->parent = dst;);
1526 region_insert(&dst->vm_regions_avl, newvr);
1527 physr_start_iter_least(vr->phys, &iter_orig);
1528 physr_start_iter_least(newvr->phys, &iter_new);
1529 while((orig_ph = physr_get_iter(&iter_orig))) {
1530 struct phys_block *pb;
1531 new_ph = physr_get_iter(&iter_new);
1532 /* Check two physregions both are nonnull,
1533 * are different, and match physblocks.
1535 assert(new_ph);
1536 assert(orig_ph);
1537 assert(orig_ph != new_ph);
1538 pb = orig_ph->ph;
1539 assert(orig_ph->ph == new_ph->ph);
1541 /* If the phys block has been shared as SMAP,
1542 * do the regular copy. */
1543 if(pb->refcount > 2 && pb->share_flag == PBSH_SMAP) {
1544 map_clone_ph_block(dst, newvr,new_ph,
1545 &iter_new);
1546 } else {
1547 USE(pb, pb->share_flag = PBSH_COW;);
1550 /* Get next new physregion */
1551 physr_incr_iter(&iter_orig);
1552 physr_incr_iter(&iter_new);
1554 assert(!physr_get_iter(&iter_new));
1555 region_incr_iter(&v_iter);
1558 map_writept(src);
1559 map_writept(dst);
1561 SANITYCHECK(SCL_FUNCTIONS);
1562 return OK;
1565 int map_region_extend_upto_v(struct vmproc *vmp, vir_bytes v)
1567 vir_bytes offset = v, end;
1568 struct vir_region *vr, *nextvr;
1569 int r = OK;
1571 if(!(vr = region_search(&vmp->vm_regions_avl, offset, AVL_LESS))) {
1572 printf("VM: nothing to extend\n");
1573 return ENOMEM;
1576 if(!(vr->flags & VR_ANON)) {
1577 printf("VM: memory range to extend not anonymous\n");
1578 return ENOMEM;
1581 assert(vr->vaddr <= offset);
1582 if((nextvr = getnextvr(vr))) {
1583 assert(offset <= nextvr->vaddr);
1586 end = vr->vaddr + vr->length;
1588 offset = roundup(offset, VM_PAGE_SIZE);
1590 if(end < offset)
1591 r = map_region_extend(vmp, vr, offset - end);
1593 return r;
1596 /*========================================================================*
1597 * map_region_extend *
1598 *========================================================================*/
1599 int map_region_extend(struct vmproc *vmp, struct vir_region *vr,
1600 vir_bytes delta)
1602 vir_bytes end;
1603 struct vir_region *nextvr;
1605 assert(vr);
1606 assert(vr->flags & VR_ANON);
1607 assert(!(delta % VM_PAGE_SIZE));
1608 if(vr->flags & VR_CONTIG) {
1609 printf("VM: can't grow contig region\n");
1610 return EFAULT;
1613 if(!delta) return OK;
1614 end = vr->vaddr + vr->length;
1615 assert(end >= vr->vaddr);
1617 if(end + delta <= end) {
1618 printf("VM: strange delta 0x%lx\n", delta);
1619 return ENOMEM;
1622 nextvr = getnextvr(vr);
1624 if(!nextvr || end + delta <= nextvr->vaddr) {
1625 USE(vr, vr->length += delta;);
1626 return OK;
1629 return ENOMEM;
1632 /*========================================================================*
1633 * map_region_shrink *
1634 *========================================================================*/
1635 int map_region_shrink(struct vir_region *vr, vir_bytes delta)
1637 assert(vr);
1638 assert(vr->flags & VR_ANON);
1639 assert(!(delta % VM_PAGE_SIZE));
1641 #if 0
1642 printf("VM: ignoring region shrink\n");
1643 #endif
1645 return OK;
1648 struct vir_region *map_region_lookup_tag(vmp, tag)
1649 struct vmproc *vmp;
1650 u32_t tag;
1652 struct vir_region *vr;
1653 region_iter v_iter;
1654 region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
1656 while((vr = region_get_iter(&v_iter))) {
1657 if(vr->tag == tag)
1658 return vr;
1659 region_incr_iter(&v_iter);
1662 return NULL;
1665 void map_region_set_tag(struct vir_region *vr, u32_t tag)
1667 USE(vr, vr->tag = tag;);
1670 u32_t map_region_get_tag(struct vir_region *vr)
1672 return vr->tag;
1675 /*========================================================================*
1676 * map_unmap_region *
1677 *========================================================================*/
1678 int map_unmap_region(struct vmproc *vmp, struct vir_region *r,
1679 vir_bytes offset, vir_bytes len)
1681 /* Shrink the region by 'len' bytes, from the start. Unreference
1682 * memory it used to reference if any.
1684 vir_bytes regionstart;
1686 SANITYCHECK(SCL_FUNCTIONS);
1688 if(offset+len > r->length || (len % VM_PAGE_SIZE)) {
1689 printf("VM: bogus length 0x%lx\n", len);
1690 return EINVAL;
1693 if(!(r->flags & (VR_ANON|VR_DIRECT))) {
1694 printf("VM: only unmap anonymous or direct memory\n");
1695 return EINVAL;
1698 regionstart = r->vaddr + offset;
1700 /* unreference its memory */
1701 map_subfree(r, offset, len);
1703 /* if unmap was at start/end of this region, it actually shrinks */
1704 if(offset == 0) {
1705 struct phys_region *pr;
1706 physr_iter iter;
1708 region_remove(&vmp->vm_regions_avl, r->vaddr);
1710 USE(r,
1711 r->vaddr += len;
1712 r->length -= len;);
1714 region_insert(&vmp->vm_regions_avl, r);
1716 /* vaddr has increased; to make all the phys_regions
1717 * point to the same addresses, make them shrink by the
1718 * same amount.
1720 physr_init_iter(&iter);
1721 physr_start_iter(r->phys, &iter, offset, AVL_GREATER_EQUAL);
1723 while((pr = physr_get_iter(&iter))) {
1724 assert(pr->offset >= offset);
1725 USE(pr, pr->offset -= len;);
1726 physr_incr_iter(&iter);
1728 } else if(offset + len == r->length) {
1729 assert(len <= r->length);
1730 r->length -= len;
1733 if(r->length == 0) {
1734 /* Whole region disappears. Unlink and free it. */
1735 region_remove(&vmp->vm_regions_avl, r->vaddr);
1736 map_free(r);
1739 SANITYCHECK(SCL_DETAIL);
1741 if(pt_writemap(vmp, &vmp->vm_pt, regionstart,
1742 MAP_NONE, len, 0, WMF_OVERWRITE) != OK) {
1743 printf("VM: map_unmap_region: pt_writemap failed\n");
1744 return ENOMEM;
1747 SANITYCHECK(SCL_FUNCTIONS);
1749 return OK;
1752 /*========================================================================*
1753 * map_remap *
1754 *========================================================================*/
1755 int map_remap(struct vmproc *dvmp, vir_bytes da, size_t size,
1756 struct vir_region *region, vir_bytes *r, int readonly)
1758 struct vir_region *vr;
1759 struct phys_region *ph;
1760 vir_bytes startv, dst_addr;
1761 physr_iter iter;
1763 SANITYCHECK(SCL_FUNCTIONS);
1765 assert(region->flags & VR_SHARED);
1767 /* da is handled differently */
1768 if (!da)
1769 dst_addr = 0;
1770 else
1771 dst_addr = da;
1773 /* round up to page size */
1774 assert(!(size % VM_PAGE_SIZE));
1775 startv = region_find_slot(dvmp, dst_addr, VM_DATATOP, size);
1776 if (startv == SLOT_FAIL) {
1777 return ENOMEM;
1779 /* when the user specifies the address, we cannot change it */
1780 if (da && (startv != dst_addr))
1781 return EINVAL;
1783 vr = map_copy_region(dvmp, region);
1784 if(!vr)
1785 return ENOMEM;
1787 USE(vr,
1788 vr->vaddr = startv;
1789 vr->length = size;
1790 vr->flags = region->flags;
1791 vr->tag = VRT_NONE;
1792 vr->parent = dvmp;
1793 if(readonly) {
1794 vr->flags &= ~VR_WRITABLE;
1797 assert(vr->flags & VR_SHARED);
1799 region_insert(&dvmp->vm_regions_avl, vr);
1801 physr_start_iter_least(vr->phys, &iter);
1802 while((ph = physr_get_iter(&iter))) {
1803 if(map_ph_writept(dvmp, vr, ph) != OK) {
1804 panic("map_remap: map_ph_writept failed");
1806 physr_incr_iter(&iter);
1809 *r = startv;
1811 SANITYCHECK(SCL_FUNCTIONS);
1813 return OK;
1816 /*========================================================================*
1817 * map_get_phys *
1818 *========================================================================*/
1819 int map_get_phys(struct vmproc *vmp, vir_bytes addr, phys_bytes *r)
1821 struct vir_region *vr;
1822 struct phys_region *ph;
1823 physr_iter iter;
1825 if (!(vr = map_lookup(vmp, addr, NULL)) ||
1826 (vr->vaddr != addr))
1827 return EINVAL;
1829 if (!(vr->flags & VR_SHARED))
1830 return EINVAL;
1832 physr_start_iter_least(vr->phys, &iter);
1833 ph = physr_get_iter(&iter);
1835 assert(ph);
1836 assert(ph->ph);
1837 if (r)
1838 *r = ph->ph->phys;
1840 return OK;
1843 /*========================================================================*
1844 * map_get_ref *
1845 *========================================================================*/
1846 int map_get_ref(struct vmproc *vmp, vir_bytes addr, u8_t *cnt)
1848 struct vir_region *vr;
1849 struct phys_region *ph;
1850 physr_iter iter;
1852 if (!(vr = map_lookup(vmp, addr, NULL)) ||
1853 (vr->vaddr != addr))
1854 return EINVAL;
1856 if (!(vr->flags & VR_SHARED))
1857 return EINVAL;
1859 physr_start_iter_least(vr->phys, &iter);
1860 ph = physr_get_iter(&iter);
1862 assert(ph);
1863 assert(ph->ph);
1864 if (cnt)
1865 *cnt = ph->ph->refcount;
1867 return OK;
1870 /*========================================================================*
1871 * get_stats_info *
1872 *========================================================================*/
1873 void get_stats_info(struct vm_stats_info *vsi)
1875 yielded_t *yb;
1877 vsi->vsi_cached = 0L;
1879 for(yb = lru_youngest; yb; yb = yb->older)
1880 vsi->vsi_cached++;
1883 void get_usage_info_kernel(struct vm_usage_info *vui)
1885 memset(vui, 0, sizeof(*vui));
1886 vui->vui_total = kernel_boot_info.kernel_allocated_bytes;
1889 static void get_usage_info_vm(struct vm_usage_info *vui)
1891 memset(vui, 0, sizeof(*vui));
1892 vui->vui_total = kernel_boot_info.vm_allocated_bytes +
1893 get_vm_self_pages() * VM_PAGE_SIZE;
1896 /*========================================================================*
1897 * get_usage_info *
1898 *========================================================================*/
1899 void get_usage_info(struct vmproc *vmp, struct vm_usage_info *vui)
1901 struct vir_region *vr;
1902 physr_iter iter;
1903 struct phys_region *ph;
1904 region_iter v_iter;
1905 region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
1907 memset(vui, 0, sizeof(*vui));
1909 if(vmp->vm_endpoint == VM_PROC_NR) {
1910 get_usage_info_vm(vui);
1911 return;
1914 if(vmp->vm_endpoint < 0) {
1915 get_usage_info_kernel(vui);
1916 return;
1919 while((vr = region_get_iter(&v_iter))) {
1920 physr_start_iter_least(vr->phys, &iter);
1921 while((ph = physr_get_iter(&iter))) {
1922 /* All present pages are counted towards the total. */
1923 vui->vui_total += VM_PAGE_SIZE;
1925 if (ph->ph->refcount > 1) {
1926 /* Any page with a refcount > 1 is common. */
1927 vui->vui_common += VM_PAGE_SIZE;
1929 /* Any common, non-COW page is shared. */
1930 if (vr->flags & VR_SHARED ||
1931 ph->ph->share_flag == PBSH_SMAP)
1932 vui->vui_shared += VM_PAGE_SIZE;
1934 physr_incr_iter(&iter);
1936 region_incr_iter(&v_iter);
1940 /*===========================================================================*
1941 * get_region_info *
1942 *===========================================================================*/
1943 int get_region_info(struct vmproc *vmp, struct vm_region_info *vri,
1944 int max, vir_bytes *nextp)
1946 struct vir_region *vr;
1947 vir_bytes next;
1948 int count;
1949 region_iter v_iter;
1951 next = *nextp;
1953 if (!max) return 0;
1955 region_start_iter(&vmp->vm_regions_avl, &v_iter, next, AVL_GREATER_EQUAL);
1956 if(!(vr = region_get_iter(&v_iter))) return 0;
1958 for(count = 0; (vr = region_get_iter(&v_iter)) && count < max; count++, vri++) {
1959 struct phys_region *ph1, *ph2;
1961 /* Report part of the region that's actually in use. */
1963 /* Get first and last phys_regions, if any */
1964 ph1 = physr_search_least(vr->phys);
1965 ph2 = physr_search_greatest(vr->phys);
1966 if(!ph1 || !ph2) { assert(!ph1 && !ph2); continue; }
1968 /* Report start+length of region starting from lowest use. */
1969 vri->vri_addr = vr->vaddr + ph1->offset;
1970 vri->vri_prot = 0;
1971 vri->vri_length = ph2->offset + VM_PAGE_SIZE - ph1->offset;
1973 /* "AND" the provided protection with per-page protection. */
1974 if (!(vr->flags & VR_WRITABLE))
1975 vri->vri_prot &= ~PROT_WRITE;
1977 vri->vri_flags = (vr->flags & VR_SHARED) ? MAP_IPC_SHARED : 0;
1979 next = vr->vaddr + vr->length;
1980 region_incr_iter(&v_iter);
1983 *nextp = next;
1984 return count;
1987 /*========================================================================*
1988 * regionprintstats *
1989 *========================================================================*/
1990 void printregionstats(struct vmproc *vmp)
1992 struct vir_region *vr;
1993 struct phys_region *pr;
1994 physr_iter iter;
1995 vir_bytes used = 0, weighted = 0;
1996 region_iter v_iter;
1997 region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
1999 while((vr = region_get_iter(&v_iter))) {
2000 region_incr_iter(&v_iter);
2001 if(vr->flags & VR_DIRECT)
2002 continue;
2003 physr_start_iter_least(vr->phys, &iter);
2004 while((pr = physr_get_iter(&iter))) {
2005 physr_incr_iter(&iter);
2006 used += VM_PAGE_SIZE;
2007 weighted += VM_PAGE_SIZE / pr->ph->refcount;
2011 printf("%6lukB %6lukB\n", used/1024, weighted/1024);
2013 return;
2016 /*===========================================================================*
2017 * do_map_memory *
2018 *===========================================================================*/
2019 static int do_map_memory(struct vmproc *vms, struct vmproc *vmd,
2020 struct vir_region *vrs, struct vir_region *vrd,
2021 vir_bytes offset_s, vir_bytes offset_d,
2022 vir_bytes length, int flag)
2024 struct phys_region *prs;
2025 struct phys_region *newphysr;
2026 struct phys_block *pb;
2027 physr_iter iter;
2028 u32_t pt_flag = PTF_PRESENT | PTF_USER;
2029 vir_bytes end;
2031 /* Search for the first phys region in the source process. */
2032 physr_start_iter(vrs->phys, &iter, offset_s, AVL_EQUAL);
2033 prs = physr_get_iter(&iter);
2034 if(!prs)
2035 panic("do_map_memory: no aligned phys region: %d", 0);
2037 /* flag: 0 -> read-only
2038 * 1 -> writable
2039 * -1 -> share as COW, so read-only
2041 if(flag > 0)
2042 pt_flag |= PTF_WRITE;
2043 else
2044 pt_flag |= PTF_READ;
2046 /* Map phys blocks in the source process to the destination process. */
2047 end = offset_d + length;
2048 while((prs = physr_get_iter(&iter)) && offset_d < end) {
2049 /* If a SMAP share was requested but the phys block has already
2050 * been shared as COW, copy the block for the source phys region
2051 * first.
2053 pb = prs->ph;
2054 if(flag >= 0 && pb->refcount > 1
2055 && pb->share_flag == PBSH_COW) {
2056 if(!(prs = map_clone_ph_block(vms, vrs, prs, &iter)))
2057 return ENOMEM;
2058 pb = prs->ph;
2061 /* Allocate a new phys region. */
2062 if(!(newphysr = pb_reference(pb, offset_d, vrd)))
2063 return ENOMEM;
2065 /* If a COW share was requested but the phys block has already
2066 * been shared as SMAP, give up on COW and copy the block for
2067 * the destination phys region now.
2069 if(flag < 0 && pb->refcount > 1
2070 && pb->share_flag == PBSH_SMAP) {
2071 if(!(newphysr = map_clone_ph_block(vmd, vrd,
2072 newphysr, NULL))) {
2073 return ENOMEM;
2076 else {
2077 /* See if this is a COW share or SMAP share. */
2078 if(flag < 0) { /* COW share */
2079 pb->share_flag = PBSH_COW;
2080 /* Update the page table for the src process. */
2081 pt_writemap(vms, &vms->vm_pt, offset_s + vrs->vaddr,
2082 pb->phys, VM_PAGE_SIZE,
2083 pt_flag, WMF_OVERWRITE);
2085 else { /* SMAP share */
2086 pb->share_flag = PBSH_SMAP;
2088 /* Update the page table for the destination process. */
2089 pt_writemap(vmd, &vmd->vm_pt, offset_d + vrd->vaddr,
2090 pb->phys, VM_PAGE_SIZE, pt_flag, WMF_OVERWRITE);
2093 physr_incr_iter(&iter);
2094 offset_d += VM_PAGE_SIZE;
2095 offset_s += VM_PAGE_SIZE;
2097 return OK;
2100 /*===========================================================================*
2101 * unmap_memory *
2102 *===========================================================================*/
2103 int unmap_memory(endpoint_t sour, endpoint_t dest,
2104 vir_bytes virt_s, vir_bytes virt_d, vir_bytes length, int flag)
2106 struct vmproc *vmd;
2107 struct vir_region *vrd;
2108 struct phys_region *pr;
2109 struct phys_block *pb;
2110 physr_iter iter;
2111 vir_bytes off, end;
2112 int p;
2114 /* Use information on the destination process to unmap. */
2115 if(vm_isokendpt(dest, &p) != OK)
2116 panic("unmap_memory: bad endpoint: %d", dest);
2117 vmd = &vmproc[p];
2119 vrd = map_lookup(vmd, virt_d, NULL);
2120 assert(vrd);
2122 /* Search for the first phys region in the destination process. */
2123 off = virt_d - vrd->vaddr;
2124 physr_start_iter(vrd->phys, &iter, off, AVL_EQUAL);
2125 pr = physr_get_iter(&iter);
2126 if(!pr)
2127 panic("unmap_memory: no aligned phys region: %d", 0);
2129 /* Copy the phys block now rather than doing COW. */
2130 end = off + length;
2131 while((pr = physr_get_iter(&iter)) && off < end) {
2132 pb = pr->ph;
2133 assert(pb->refcount > 1);
2134 assert(pb->share_flag == PBSH_SMAP);
2136 if(!(pr = map_clone_ph_block(vmd, vrd, pr, &iter)))
2137 return ENOMEM;
2139 physr_incr_iter(&iter);
2140 off += VM_PAGE_SIZE;
2143 return OK;
2147 /*===========================================================================*
2148 * rm_phys_regions *
2149 *===========================================================================*/
2150 static void rm_phys_regions(struct vir_region *region,
2151 vir_bytes begin, vir_bytes length)
2153 /* Remove all phys regions between @begin and @begin+length.
2155 * Don't update the page table, because we will update it at map_memory()
2156 * later.
2158 struct phys_region *pr;
2159 physr_iter iter;
2161 physr_start_iter(region->phys, &iter, begin, AVL_GREATER_EQUAL);
2162 while((pr = physr_get_iter(&iter)) && pr->offset < begin + length) {
2163 pb_unreferenced(region, pr, 1);
2164 physr_start_iter(region->phys, &iter, begin,
2165 AVL_GREATER_EQUAL);
2166 SLABFREE(pr);
2170 /*===========================================================================*
2171 * map_memory *
2172 *===========================================================================*/
2173 int map_memory(endpoint_t sour, endpoint_t dest,
2174 vir_bytes virt_s, vir_bytes virt_d, vir_bytes length, int flag)
2176 /* This is the entry point. This function will be called by handle_memory() when
2177 * VM recieves a map-memory request.
2179 struct vmproc *vms, *vmd;
2180 struct vir_region *vrs, *vrd;
2181 vir_bytes offset_s, offset_d;
2182 int p;
2183 int r;
2185 if(vm_isokendpt(sour, &p) != OK)
2186 panic("map_memory: bad endpoint: %d", sour);
2187 vms = &vmproc[p];
2188 if(vm_isokendpt(dest, &p) != OK)
2189 panic("map_memory: bad endpoint: %d", dest);
2190 vmd = &vmproc[p];
2192 vrs = map_lookup(vms, virt_s, NULL);
2193 assert(vrs);
2194 vrd = map_lookup(vmd, virt_d, NULL);
2195 assert(vrd);
2197 /* Linear address -> offset from start of vir region. */
2198 offset_s = virt_s - vrs->vaddr;
2199 offset_d = virt_d - vrd->vaddr;
2201 /* Make sure that the range in the source process has been mapped
2202 * to physical memory.
2204 map_handle_memory(vms, vrs, offset_s, length, 0);
2206 /* Prepare work. */
2207 rm_phys_regions(vrd, offset_d, length);
2209 /* Map memory. */
2210 r = do_map_memory(vms, vmd, vrs, vrd, offset_s, offset_d, length, flag);
2212 return r;
2215 /*===========================================================================*
2216 * get_clean_phys_region *
2217 *===========================================================================*/
2218 static struct phys_region *
2219 get_clean_phys_region(struct vmproc *vmp, vir_bytes vaddr, struct vir_region **ret_region)
2221 struct vir_region *region;
2222 vir_bytes mapaddr;
2223 struct phys_region *ph;
2225 mapaddr = vaddr;
2227 if(!(region = map_lookup(vmp, mapaddr, &ph)) || !ph) {
2228 printf("VM: get_clean_phys_region: 0x%lx not found\n", vaddr);
2229 return NULL;
2232 assert(mapaddr >= region->vaddr);
2233 assert(mapaddr < region->vaddr + region->length);
2235 /* If it's mapped more than once, make a copy. */
2236 assert(ph->ph->refcount > 0);
2237 if(ph->ph->refcount > 1) {
2238 if(!(ph = map_clone_ph_block(vmp, region,
2239 ph, NULL))) {
2240 printf("VM: get_clean_phys_region: ph copy failed\n");
2241 return NULL;
2245 assert(ph->ph->refcount == 1);
2247 *ret_region = region;
2249 return ph;
2252 static int getblock(struct vmproc *vmp, u64_t id, vir_bytes vaddr, int pages)
2254 yielded_t *yb;
2255 struct phys_region *ph;
2256 struct vir_region *region;
2257 yielded_avl *avl;
2258 block_id_t blockid;
2259 phys_bytes phaddr;
2260 int p;
2262 /* Try to get the yielded block */
2263 blockid.owner = vmp->vm_endpoint;
2264 blockid.id = id;
2265 avl = get_yielded_avl(blockid);
2266 if(!(yb = yielded_search(avl, blockid, AVL_EQUAL))) {
2267 return ESRCH;
2270 if(yb->pages != pages) {
2271 printf("VM: getblock: length mismatch (%d != %d)\n",
2272 pages, yb->pages);
2273 return EFAULT;
2276 phaddr = yb->physaddr;
2278 for(p = 0; p < pages; p++) {
2279 /* Get the intended phys region, make sure refcount is 1. */
2280 if(!(ph = get_clean_phys_region(vmp, vaddr, &region))) {
2281 printf("VM: getblock: not found for %d\n", vmp->vm_endpoint);
2282 return EINVAL;
2285 assert(ph->ph->refcount == 1);
2287 /* Free the block that is currently there. */
2288 free_mem(ABS2CLICK(ph->ph->phys), 1);
2290 /* Set the phys block to new addr and update pagetable. */
2291 USE(ph->ph, ph->ph->phys = phaddr;);
2292 if(map_ph_writept(vmp, region, ph) != OK) {
2293 /* Presumably it was mapped, so there is no reason
2294 * updating should fail.
2296 panic("do_get_block: couldn't write pt");
2299 vaddr += VM_PAGE_SIZE;
2300 phaddr += VM_PAGE_SIZE;
2303 /* Forget about the yielded block and free the struct. */
2304 freeyieldednode(yb, 0);
2306 return OK;
2309 static int yieldblock(struct vmproc *vmp, u64_t id,
2310 vir_bytes vaddr, yielded_t **retyb, int pages)
2312 yielded_t *newyb;
2313 vir_bytes mem_clicks, v, p, new_phaddr;
2314 struct vir_region *region;
2315 struct phys_region *ph = NULL, *prev_ph = NULL, *first_ph = NULL;
2316 yielded_avl *avl;
2317 block_id_t blockid;
2319 /* Makes no sense if yielded block ID already exists, and
2320 * is likely a serious bug in the caller.
2322 blockid.id = id;
2323 blockid.owner = vmp->vm_endpoint;
2324 avl = get_yielded_avl(blockid);
2325 if(yielded_search(avl, blockid, AVL_EQUAL)) {
2326 printf("!");
2327 return EINVAL;
2330 if((vaddr % VM_PAGE_SIZE) || pages < 1) return EFAULT;
2332 v = vaddr;
2333 for(p = 0; p < pages; p++) {
2334 if(!(region = map_lookup(vmp, v, &ph)) || !ph) {
2335 printf("VM: do_yield_block: not found for %d\n",
2336 vmp->vm_endpoint);
2337 return EINVAL;
2339 if(!(region->flags & VR_ANON)) {
2340 printf("VM: yieldblock: non-anon 0x%lx\n", v);
2341 return EFAULT;
2343 if(ph->ph->refcount != 1) {
2344 printf("VM: do_yield_block: mapped not once for %d\n",
2345 vmp->vm_endpoint);
2346 return EFAULT;
2348 if(prev_ph) {
2349 if(ph->ph->phys != prev_ph->ph->phys + VM_PAGE_SIZE) {
2350 printf("VM: physically discontiguous yield\n");
2351 return EINVAL;
2354 prev_ph = ph;
2355 if(!first_ph) first_ph = ph;
2356 v += VM_PAGE_SIZE;
2359 /* Make a new block to record the yielding in. */
2360 if(!SLABALLOC(newyb)) {
2361 return ENOMEM;
2364 assert(!(ph->ph->phys % VM_PAGE_SIZE));
2366 if((mem_clicks = alloc_mem(pages, PAF_CLEAR)) == NO_MEM) {
2367 SLABFREE(newyb);
2368 return ENOMEM;
2371 /* Update yielded block info. */
2372 USE(newyb,
2373 newyb->id = blockid;
2374 newyb->physaddr = first_ph->ph->phys;
2375 newyb->pages = pages;
2376 newyb->younger = NULL;);
2378 new_phaddr = CLICK2ABS(mem_clicks);
2380 /* Set new phys block to new addr and update pagetable. */
2381 v = vaddr;
2382 for(p = 0; p < pages; p++) {
2383 region = map_lookup(vmp, v, &ph);
2384 assert(region && ph);
2385 assert(ph->ph->refcount == 1);
2386 USE(ph->ph,
2387 ph->ph->phys = new_phaddr;);
2388 if(map_ph_writept(vmp, region, ph) != OK) {
2389 /* Presumably it was mapped, so there is no reason
2390 * updating should fail.
2392 panic("yield_block: couldn't write pt");
2394 v += VM_PAGE_SIZE;
2395 new_phaddr += VM_PAGE_SIZE;
2398 /* Remember yielded block. */
2400 yielded_insert(avl, newyb);
2401 vmp->vm_yielded++;
2403 /* Add to LRU list too. It's the youngest block. */
2404 LRUCHECK;
2406 if(lru_youngest) {
2407 USE(lru_youngest,
2408 lru_youngest->younger = newyb;);
2409 } else {
2410 lru_oldest = newyb;
2413 USE(newyb,
2414 newyb->older = lru_youngest;);
2416 lru_youngest = newyb;
2418 LRUCHECK;
2420 if(retyb)
2421 *retyb = newyb;
2423 return OK;
2426 /*===========================================================================*
2427 * do_forgetblocks *
2428 *===========================================================================*/
2429 int do_forgetblocks(message *m)
2431 int n;
2432 struct vmproc *vmp;
2433 endpoint_t caller = m->m_source;
2435 if(vm_isokendpt(caller, &n) != OK)
2436 panic("do_yield_block: message from strange source: %d",
2437 m->m_source);
2439 vmp = &vmproc[n];
2441 free_yielded_proc(vmp);
2443 return OK;
2446 /*===========================================================================*
2447 * do_forgetblock *
2448 *===========================================================================*/
2449 int do_forgetblock(message *m)
2451 int n;
2452 struct vmproc *vmp;
2453 endpoint_t caller = m->m_source;
2454 yielded_t *yb;
2455 u64_t id;
2456 block_id_t blockid;
2457 yielded_avl *avl;
2459 if(vm_isokendpt(caller, &n) != OK)
2460 panic("do_yield_block: message from strange source: %d",
2461 m->m_source);
2463 vmp = &vmproc[n];
2465 id = make64(m->VMFB_IDLO, m->VMFB_IDHI);
2467 blockid.id = id;
2468 blockid.owner = vmp->vm_endpoint;
2469 avl = get_yielded_avl(blockid);
2470 if((yb = yielded_search(avl, blockid, AVL_EQUAL))) {
2471 freeyieldednode(yb, 1);
2474 return OK;
2477 /*===========================================================================*
2478 * do_yieldblockgetblock *
2479 *===========================================================================*/
2480 int do_yieldblockgetblock(message *m)
2482 u64_t yieldid, getid;
2483 int n;
2484 endpoint_t caller = m->m_source;
2485 struct vmproc *vmp;
2486 yielded_t *yb = NULL;
2487 int r = ESRCH;
2488 int pages;
2490 if(vm_isokendpt(caller, &n) != OK)
2491 panic("do_yieldblockgetblock: message from strange source: %d",
2492 m->m_source);
2494 vmp = &vmproc[n];
2496 pages = m->VMYBGB_LEN / VM_PAGE_SIZE;
2498 if((m->VMYBGB_LEN % VM_PAGE_SIZE) || pages < 1) {
2499 static int printed;
2500 if(!printed) {
2501 printed = 1;
2502 printf("vm: non-page-aligned or short block length\n");
2504 return EFAULT;
2507 yieldid = make64(m->VMYBGB_YIELDIDLO, m->VMYBGB_YIELDIDHI);
2508 getid = make64(m->VMYBGB_GETIDLO, m->VMYBGB_GETIDHI);
2510 if(cmp64(yieldid, VM_BLOCKID_NONE) != 0) {
2511 /* A block was given to yield. */
2512 yieldblock(vmp, yieldid, (vir_bytes) m->VMYBGB_VADDR, &yb,
2513 pages);
2516 if(cmp64(getid, VM_BLOCKID_NONE) != 0) {
2517 /* A block was given to get. */
2518 r = getblock(vmp, getid, (vir_bytes) m->VMYBGB_VADDR, pages);
2521 return r;
2524 void map_setparent(struct vmproc *vmp)
2526 region_iter iter;
2527 struct vir_region *vr;
2528 region_start_iter_least(&vmp->vm_regions_avl, &iter);
2529 while((vr = region_get_iter(&iter))) {
2530 USE(vr, vr->parent = vmp;);
2531 region_incr_iter(&iter);