VM: abstract datastructures a bit
[minix.git] / servers / vm / region.c
blob58aca9d37302643c8d380284e250591c92d2d072
2 #define _SYSTEM 1
4 #include <minix/com.h>
5 #include <minix/callnr.h>
6 #include <minix/type.h>
7 #include <minix/config.h>
8 #include <minix/const.h>
9 #include <minix/sysutil.h>
10 #include <minix/syslib.h>
11 #include <minix/debug.h>
12 #include <minix/bitmap.h>
13 #include <minix/hash.h>
15 #include <sys/mman.h>
17 #include <limits.h>
18 #include <string.h>
19 #include <errno.h>
20 #include <assert.h>
21 #include <stdint.h>
22 #include <memory.h>
23 #include <sys/param.h>
25 #include "vm.h"
26 #include "proto.h"
27 #include "util.h"
28 #include "glo.h"
29 #include "region.h"
30 #include "sanitycheck.h"
31 #include "physravl.h"
32 #include "memlist.h"
34 /* LRU list. */
35 static yielded_t *lru_youngest = NULL, *lru_oldest = NULL;
37 /* Should a physblock be mapped writable? */
38 #define WRITABLE(r, pb) \
39 (((r)->flags & VR_WRITABLE) && \
40 (((r)->flags & (VR_DIRECT | VR_SHARED)) || \
41 (pb)->refcount == 1))
43 static int map_new_physblock(struct vmproc *vmp, struct vir_region
44 *region, vir_bytes offset, vir_bytes length, phys_bytes what, u32_t
45 allocflags, int written);
47 static int map_ph_writept(struct vmproc *vmp, struct vir_region *vr,
48 struct phys_region *pr);
50 static phys_bytes freeyieldednode(yielded_t *node, int freemem);
52 static struct vir_region *map_copy_region(struct vmproc *vmp, struct
53 vir_region *vr);
55 static struct phys_region *map_clone_ph_block(struct vmproc *vmp,
56 struct vir_region *region, struct phys_region *ph, physr_iter *iter);
58 #if SANITYCHECKS
59 static void lrucheck(void);
60 #endif
62 /* hash table of yielded blocks */
63 #define YIELD_HASHSIZE 65536
64 static yielded_avl vm_yielded_blocks[YIELD_HASHSIZE];
66 static int avl_inited = 0;
68 void map_region_init(void)
70 int h;
71 assert(!avl_inited);
72 for(h = 0; h < YIELD_HASHSIZE; h++)
73 yielded_init(&vm_yielded_blocks[h]);
74 avl_inited = 1;
77 static yielded_avl *get_yielded_avl(block_id_t id)
79 u32_t h;
81 assert(avl_inited);
83 hash_i_64(id.owner, id.id, h);
84 h = h % YIELD_HASHSIZE;
86 assert(h >= 0);
87 assert(h < YIELD_HASHSIZE);
89 return &vm_yielded_blocks[h];
92 static char *map_name(struct vir_region *vr)
94 static char name[100];
95 char *typename, *tag;
96 int type = vr->flags & (VR_ANON|VR_DIRECT);
97 switch(type) {
98 case VR_ANON:
99 typename = "anonymous";
100 break;
101 case VR_DIRECT:
102 typename = "direct";
103 break;
104 default:
105 panic("unknown mapping type: %d", type);
108 switch(vr->tag) {
109 case VRT_TEXT:
110 tag = "text";
111 break;
112 case VRT_STACK:
113 tag = "stack";
114 break;
115 case VRT_HEAP:
116 tag = "heap";
117 break;
118 case VRT_NONE:
119 tag = "untagged";
120 break;
121 default:
122 tag = "unknown tag value";
123 break;
126 sprintf(name, "%s, %s", typename, tag);
128 return name;
131 void map_printregion(struct vmproc *vmp, struct vir_region *vr)
133 physr_iter iter;
134 struct phys_region *ph;
135 printf("map_printmap: map_name: %s\n", map_name(vr));
136 printf("\t%lx (len 0x%lx, %lukB), %p\n",
137 vr->vaddr, vr->length, vr->length/1024, map_name(vr));
138 printf("\t\tphysblocks:\n");
139 physr_start_iter_least(vr->phys, &iter);
140 while((ph = physr_get_iter(&iter))) {
141 printf("\t\t@ %lx (refs %d): phys 0x%lx\n",
142 (vr->vaddr + ph->offset),
143 ph->ph->refcount, ph->ph->phys);
144 physr_incr_iter(&iter);
148 /*===========================================================================*
149 * map_printmap *
150 *===========================================================================*/
151 void map_printmap(vmp)
152 struct vmproc *vmp;
154 struct vir_region *vr;
155 region_iter iter;
157 printf("memory regions in process %d:\n", vmp->vm_endpoint);
159 region_start_iter_least(&vmp->vm_regions_avl, &iter);
160 while((vr = region_get_iter(&iter))) {
161 map_printregion(vmp, vr);
162 region_incr_iter(&iter);
166 static struct vir_region *getnextvr(struct vir_region *vr)
168 struct vir_region *nextvr;
169 region_iter v_iter;
170 SLABSANE(vr);
171 region_start_iter(&vr->parent->vm_regions_avl, &v_iter, vr->vaddr, AVL_EQUAL);
172 assert(region_get_iter(&v_iter));
173 assert(region_get_iter(&v_iter) == vr);
174 region_incr_iter(&v_iter);
175 nextvr = region_get_iter(&v_iter);
176 if(!nextvr) return NULL;
177 SLABSANE(nextvr);
178 assert(vr->parent == nextvr->parent);
179 assert(vr->vaddr < nextvr->vaddr);
180 assert(vr->vaddr + vr->length <= nextvr->vaddr);
181 return nextvr;
184 #if SANITYCHECKS
186 /*===========================================================================*
187 * map_sanitycheck_pt *
188 *===========================================================================*/
189 static int map_sanitycheck_pt(struct vmproc *vmp,
190 struct vir_region *vr, struct phys_region *pr)
192 struct phys_block *pb = pr->ph;
193 int rw;
194 int r;
196 if(WRITABLE(vr, pb))
197 rw = PTF_WRITE;
198 else
199 rw = PTF_READ;
201 r = pt_writemap(vmp, &vmp->vm_pt, vr->vaddr + pr->offset,
202 pb->phys, VM_PAGE_SIZE, PTF_PRESENT | PTF_USER | rw, WMF_VERIFY);
204 if(r != OK) {
205 printf("proc %d phys_region 0x%lx sanity check failed\n",
206 vmp->vm_endpoint, pr->offset);
207 map_printregion(vmp, vr);
210 return r;
213 /*===========================================================================*
214 * map_sanitycheck *
215 *===========================================================================*/
216 void map_sanitycheck(char *file, int line)
218 struct vmproc *vmp;
220 lrucheck();
222 /* Macro for looping over all physical blocks of all regions of
223 * all processes.
225 #define ALLREGIONS(regioncode, physcode) \
226 for(vmp = vmproc; vmp < &vmproc[VMP_NR]; vmp++) { \
227 region_iter v_iter; \
228 struct vir_region *vr; \
229 if(!(vmp->vm_flags & VMF_INUSE)) \
230 continue; \
231 region_start_iter_least(&vmp->vm_regions_avl, &v_iter); \
232 while((vr = region_get_iter(&v_iter))) { \
233 physr_iter iter; \
234 struct phys_region *pr; \
235 regioncode; \
236 physr_start_iter_least(vr->phys, &iter); \
237 while((pr = physr_get_iter(&iter))) { \
238 physcode; \
239 physr_incr_iter(&iter); \
241 region_incr_iter(&v_iter); \
245 #define MYSLABSANE(s) MYASSERT(slabsane_f(__FILE__, __LINE__, s, sizeof(*(s))))
246 /* Basic pointers check. */
247 ALLREGIONS(MYSLABSANE(vr),MYSLABSANE(pr); MYSLABSANE(pr->ph);MYSLABSANE(pr->parent));
248 ALLREGIONS(/* MYASSERT(vr->parent == vmp) */,MYASSERT(pr->parent == vr););
250 /* Do counting for consistency check. */
251 ALLREGIONS(;,USE(pr->ph, pr->ph->seencount = 0;););
252 ALLREGIONS(;,USE(pr->ph, pr->ph->seencount++;);
253 if(pr->ph->seencount == 1) {
254 if(!(pr->parent->flags & VR_DIRECT)) {
255 MYASSERT(usedpages_add(pr->ph->phys,
256 VM_PAGE_SIZE) == OK);
261 /* Do consistency check. */
262 ALLREGIONS({ struct vir_region *nextvr = getnextvr(vr);
263 if(nextvr) {
264 MYASSERT(vr->vaddr < nextvr->vaddr);
265 MYASSERT(vr->vaddr + vr->length <= nextvr->vaddr);
268 MYASSERT(!(vr->vaddr % VM_PAGE_SIZE));,
269 if(pr->ph->refcount != pr->ph->seencount) {
270 map_printmap(vmp);
271 printf("ph in vr %p: 0x%lx refcount %u "
272 "but seencount %u\n",
273 vr, pr->offset,
274 pr->ph->refcount, pr->ph->seencount);
277 int n_others = 0;
278 struct phys_region *others;
279 if(pr->ph->refcount > 0) {
280 MYASSERT(pr->ph->firstregion);
281 if(pr->ph->refcount == 1) {
282 MYASSERT(pr->ph->firstregion == pr);
284 } else {
285 MYASSERT(!pr->ph->firstregion);
287 for(others = pr->ph->firstregion; others;
288 others = others->next_ph_list) {
289 MYSLABSANE(others);
290 MYASSERT(others->ph == pr->ph);
291 n_others++;
293 MYASSERT(pr->ph->refcount == n_others);
295 MYASSERT(pr->ph->refcount == pr->ph->seencount);
296 MYASSERT(!(pr->offset % VM_PAGE_SIZE)););
297 ALLREGIONS(,MYASSERT(map_sanitycheck_pt(vmp, vr, pr) == OK));
300 #define LRUCHECK lrucheck()
302 static void lrucheck(void)
304 yielded_t *list;
306 /* list is empty and ok if both ends point to null. */
307 if(!lru_youngest && !lru_oldest)
308 return;
310 /* if not, both should point to something. */
311 SLABSANE(lru_youngest);
312 SLABSANE(lru_oldest);
314 assert(!lru_youngest->younger);
315 assert(!lru_oldest->older);
317 for(list = lru_youngest; list; list = list->older) {
318 SLABSANE(list);
319 if(list->younger) {
320 SLABSANE(list->younger);
321 assert(list->younger->older == list);
322 } else assert(list == lru_youngest);
323 if(list->older) {
324 SLABSANE(list->older);
325 assert(list->older->younger == list);
326 } else assert(list == lru_oldest);
330 void blockstats(void)
332 yielded_t *list;
333 int blocks = 0;
334 phys_bytes mem = 0;
335 clock_t ticks;
336 int s;
338 s = getuptime(&ticks);
340 assert(s == OK);
342 LRUCHECK;
344 for(list = lru_youngest; list; list = list->older) {
345 mem += VM_PAGE_SIZE;
346 blocks++;
349 if(blocks > 0)
350 printf("%d blocks, %lukB; ", blocks, mem/1024);
352 printmemstats();
354 #else
355 #define LRUCHECK
356 #endif
359 /*=========================================================================*
360 * map_ph_writept *
361 *=========================================================================*/
362 static int map_ph_writept(struct vmproc *vmp, struct vir_region *vr,
363 struct phys_region *pr)
365 int rw;
366 struct phys_block *pb = pr->ph;
368 assert(!(vr->vaddr % VM_PAGE_SIZE));
369 assert(!(pr->offset % VM_PAGE_SIZE));
370 assert(pb->refcount > 0);
372 if(WRITABLE(vr, pb))
373 rw = PTF_WRITE;
374 else
375 rw = PTF_READ;
377 if(pt_writemap(vmp, &vmp->vm_pt, vr->vaddr + pr->offset,
378 pb->phys, VM_PAGE_SIZE, PTF_PRESENT | PTF_USER | rw,
379 #if SANITYCHECKS
380 !pr->written ? 0 :
381 #endif
382 WMF_OVERWRITE) != OK) {
383 printf("VM: map_writept: pt_writemap failed\n");
384 return ENOMEM;
387 #if SANITYCHECKS
388 USE(pr, pr->written = 1;);
389 #endif
391 return OK;
394 #define SLOT_FAIL ((vir_bytes) -1)
396 /*===========================================================================*
397 * region_find_slot_range *
398 *===========================================================================*/
399 static vir_bytes region_find_slot_range(struct vmproc *vmp,
400 vir_bytes minv, vir_bytes maxv, vir_bytes length)
402 struct vir_region *lastregion;
403 vir_bytes startv = 0;
404 int foundflag = 0;
405 region_iter iter;
407 SANITYCHECK(SCL_FUNCTIONS);
409 /* Length must be reasonable. */
410 assert(length > 0);
412 /* Special case: allow caller to set maxv to 0 meaning 'I want
413 * it to be mapped in right here.'
415 if(maxv == 0) {
416 maxv = minv + length;
418 /* Sanity check. */
419 if(maxv <= minv) {
420 printf("region_find_slot: minv 0x%lx and bytes 0x%lx\n",
421 minv, length);
422 return SLOT_FAIL;
426 /* Basic input sanity checks. */
427 assert(!(length % VM_PAGE_SIZE));
428 if(minv >= maxv) {
429 printf("VM: 1 minv: 0x%lx maxv: 0x%lx length: 0x%lx\n",
430 minv, maxv, length);
432 assert(minv < maxv);
433 assert(minv + length <= maxv);
435 #define FREEVRANGE_TRY(rangestart, rangeend) { \
436 vir_bytes frstart = (rangestart), frend = (rangeend); \
437 frstart = MAX(frstart, minv); \
438 frend = MIN(frend, maxv); \
439 if(frend > frstart && (frend - frstart) >= length) { \
440 startv = frend-length; \
441 foundflag = 1; \
444 #define FREEVRANGE(start, end) { \
445 assert(!foundflag); \
446 FREEVRANGE_TRY(((start)+VM_PAGE_SIZE), ((end)-VM_PAGE_SIZE)); \
447 if(!foundflag) { \
448 FREEVRANGE_TRY((start), (end)); \
452 /* find region after maxv. */
453 region_start_iter(&vmp->vm_regions_avl, &iter, maxv, AVL_GREATER_EQUAL);
454 lastregion = region_get_iter(&iter);
456 if(!lastregion) {
457 /* This is the free virtual address space after the last region. */
458 region_start_iter(&vmp->vm_regions_avl, &iter, maxv, AVL_LESS);
459 lastregion = region_get_iter(&iter);
460 FREEVRANGE(lastregion ?
461 lastregion->vaddr+lastregion->length : 0, VM_DATATOP);
464 if(!foundflag) {
465 struct vir_region *vr;
466 while((vr = region_get_iter(&iter)) && !foundflag) {
467 struct vir_region *nextvr;
468 region_decr_iter(&iter);
469 nextvr = region_get_iter(&iter);
470 FREEVRANGE(nextvr ? nextvr->vaddr+nextvr->length : 0,
471 vr->vaddr);
475 if(!foundflag) {
476 printf("VM: region_find_slot: no 0x%lx bytes found for %d between 0x%lx and 0x%lx\n",
477 length, vmp->vm_endpoint, minv, maxv);
478 util_stacktrace();
479 return SLOT_FAIL;
482 /* However we got it, startv must be in the requested range. */
483 assert(startv >= minv);
484 assert(startv < maxv);
485 assert(startv + length <= maxv);
487 /* remember this position as a hint for next time. */
488 vmp->vm_region_top = startv + length;
490 return startv;
493 /*===========================================================================*
494 * region_find_slot *
495 *===========================================================================*/
496 static vir_bytes region_find_slot(struct vmproc *vmp,
497 vir_bytes minv, vir_bytes maxv, vir_bytes length)
499 vir_bytes v, hint = vmp->vm_region_top;
501 /* use the top of the last inserted region as a minv hint if
502 * possible. remember that a zero maxv is a special case.
505 if(maxv && hint < maxv && hint >= minv) {
506 v = region_find_slot_range(vmp, minv, hint, length);
508 if(v != SLOT_FAIL)
509 return v;
512 return region_find_slot_range(vmp, minv, maxv, length);
515 struct vir_region *region_new(struct vmproc *vmp, vir_bytes startv, vir_bytes length, int flags)
517 physr_avl *phavl;
518 struct vir_region *newregion;
520 if(!(SLABALLOC(newregion))) {
521 printf("vm: region_new: could not allocate\n");
522 return NULL;
525 /* Fill in node details. */
526 USE(newregion,
527 newregion->vaddr = startv;
528 newregion->length = length;
529 newregion->flags = flags;
530 newregion->tag = VRT_NONE;
531 newregion->lower = newregion->higher = NULL;
532 newregion->parent = vmp;);
534 SLABALLOC(phavl);
535 if(!phavl) {
536 printf("VM: region_new: allocating phys avl failed\n");
537 SLABFREE(newregion);
538 return NULL;
540 USE(newregion, newregion->phys = phavl;);
541 physr_init(newregion->phys);
543 return newregion;
546 /*===========================================================================*
547 * map_page_region *
548 *===========================================================================*/
549 struct vir_region *map_page_region(vmp, minv, maxv, length,
550 what, flags, mapflags)
551 struct vmproc *vmp;
552 vir_bytes minv;
553 vir_bytes maxv;
554 vir_bytes length;
555 vir_bytes what;
556 u32_t flags;
557 int mapflags;
559 struct vir_region *newregion;
560 vir_bytes startv;
562 assert(!(length % VM_PAGE_SIZE));
564 SANITYCHECK(SCL_FUNCTIONS);
566 if((flags & VR_CONTIG) && !(mapflags & MF_PREALLOC)) {
567 printf("map_page_region: can't make contiguous allocation without preallocating\n");
568 return NULL;
571 startv = region_find_slot(vmp, minv, maxv, length);
572 if (startv == SLOT_FAIL)
573 return NULL;
575 /* Now we want a new region. */
576 if(!(newregion = region_new(vmp, startv, length, flags))) {
577 printf("VM: map_page_region: allocating region failed\n");
578 return NULL;
581 /* If we know what we're going to map to, map it right away. */
582 if(what != MAP_NONE) {
583 assert(!(what % VM_PAGE_SIZE));
584 assert(!(startv % VM_PAGE_SIZE));
585 assert(!(mapflags & MF_PREALLOC));
586 if(map_new_physblock(vmp, newregion, 0, length,
587 what, PAF_CLEAR, 0) != OK) {
588 printf("VM: map_new_physblock failed\n");
589 USE(newregion,
590 SLABFREE(newregion->phys););
591 SLABFREE(newregion);
592 return NULL;
596 if((flags & VR_ANON) && (mapflags & MF_PREALLOC)) {
597 if(map_handle_memory(vmp, newregion, 0, length, 1) != OK) {
598 printf("VM: map_page_region: prealloc failed\n");
599 USE(newregion,
600 SLABFREE(newregion->phys););
601 SLABFREE(newregion);
602 return NULL;
606 /* Pre-allocations should be uninitialized, but after that it's a
607 * different story.
609 USE(newregion, newregion->flags &= ~VR_UNINITIALIZED;);
611 /* Link it. */
612 region_insert(&vmp->vm_regions_avl, newregion);
614 #if SANITYCHECKS
615 assert(startv == newregion->vaddr);
617 struct vir_region *nextvr;
618 if((nextvr = getnextvr(newregion))) {
619 assert(newregion->vaddr < nextvr->vaddr);
622 #endif
624 SANITYCHECK(SCL_FUNCTIONS);
626 return newregion;
629 static struct phys_region *reset_physr_iter(struct vir_region *region,
630 physr_iter *iter, vir_bytes offset)
632 struct phys_region *ph;
634 physr_start_iter(region->phys, iter, offset, AVL_EQUAL);
635 ph = physr_get_iter(iter);
636 assert(ph);
637 assert(ph->offset == offset);
639 return ph;
642 /*===========================================================================*
643 * map_subfree *
644 *===========================================================================*/
645 static int map_subfree(struct vir_region *region, vir_bytes len)
647 struct phys_region *pr;
648 physr_iter iter;
651 #if SANITYCHECKS
653 SLABSANE(region);
654 SLABSANE(region->phys);
655 physr_start_iter_least(region->phys, &iter);
656 while((pr = physr_get_iter(&iter))) {
657 struct phys_region *others;
658 struct phys_block *pb;
660 pb = pr->ph;
662 for(others = pb->firstregion; others;
663 others = others->next_ph_list) {
664 assert(others->ph == pb);
666 physr_incr_iter(&iter);
669 #endif
671 physr_start_iter_least(region->phys, &iter);
672 while((pr = physr_get_iter(&iter))) {
673 physr_incr_iter(&iter);
674 if(pr->offset >= len)
675 break;
676 if(pr->offset + VM_PAGE_SIZE <= len) {
677 pb_unreferenced(region, pr);
678 physr_start_iter_least(region->phys, &iter);
679 SLABFREE(pr);
683 return OK;
686 /*===========================================================================*
687 * map_free *
688 *===========================================================================*/
689 static int map_free(struct vir_region *region)
691 int r;
693 if((r=map_subfree(region, region->length)) != OK) {
694 printf("%d\n", __LINE__);
695 return r;
698 USE(region,
699 SLABFREE(region->phys););
700 SLABFREE(region);
702 return OK;
705 /*===========================================================================*
706 * yielded_block_cmp *
707 *===========================================================================*/
708 int yielded_block_cmp(struct block_id *id1, struct block_id *id2)
710 if(id1->owner < id2->owner)
711 return -1;
712 if(id1->owner > id2->owner)
713 return 1;
714 return cmp64(id1->id, id2->id);
718 /*===========================================================================*
719 * free_yielded_proc *
720 *===========================================================================*/
721 static vir_bytes free_yielded_proc(struct vmproc *vmp)
723 vir_bytes total = 0;
724 int h;
726 SANITYCHECK(SCL_FUNCTIONS);
728 /* Free associated regions. */
729 for(h = 0; h < YIELD_HASHSIZE && vmp->vm_yielded > 0; h++) {
730 yielded_t *yb;
731 yielded_iter iter;
732 yielded_avl *avl = &vm_yielded_blocks[h];
733 yielded_start_iter_least(avl, &iter);
734 while((yb = yielded_get_iter(&iter))) {
735 yielded_t *next_yb;
736 SLABSANE(yb);
737 yielded_incr_iter(&iter);
738 if(yb->id.owner != vmp->vm_endpoint)
739 continue;
740 next_yb = yielded_get_iter(&iter);
741 total += freeyieldednode(yb, 1);
742 /* the above removal invalidated our iter; restart it
743 * for the node we want to start at.
745 if(!next_yb) break;
746 yielded_start_iter(avl, &iter, next_yb->id, AVL_EQUAL);
747 assert(yielded_get_iter(&iter) == next_yb);
751 return total;
755 static phys_bytes freeyieldednode(yielded_t *node, int freemem)
757 yielded_t *older, *younger, *removed;
758 yielded_avl *avl;
759 int p;
761 SLABSANE(node);
763 LRUCHECK;
765 /* Update LRU. */
767 younger = node->younger;
768 older = node->older;
770 if(younger) {
771 SLABSANE(younger);
772 assert(younger->older == node);
773 USE(younger, younger->older = node->older;);
774 } else {
775 assert(node == lru_youngest);
776 lru_youngest = node->older;
779 if(older) {
780 SLABSANE(older);
781 assert(older->younger == node);
782 USE(older, older->younger = node->younger;);
783 } else {
784 assert(node == lru_oldest);
785 lru_oldest = node->younger;
788 LRUCHECK;
790 /* Update AVL. */
792 if(vm_isokendpt(node->id.owner, &p) != OK)
793 panic("out of date owner of yielded block %d", node->id.owner);
794 avl = get_yielded_avl(node->id);
795 removed = yielded_remove(avl, node->id);
796 assert(removed == node);
797 assert(vmproc[p].vm_yielded > 0);
798 vmproc[p].vm_yielded--;
800 /* Free associated memory if requested. */
802 if(freemem) {
803 free_mem(ABS2CLICK(node->addr), 1);
806 /* Free node. */
807 SLABFREE(node);
809 return VM_PAGE_SIZE;
812 /*========================================================================*
813 * free_yielded *
814 *========================================================================*/
815 vir_bytes free_yielded(vir_bytes max_bytes)
818 /* PRIVATE yielded_t *lru_youngest = NULL, *lru_oldest = NULL; */
819 vir_bytes freed = 0;
820 int blocks = 0;
822 while(freed < max_bytes && lru_oldest) {
823 SLABSANE(lru_oldest);
824 freed += freeyieldednode(lru_oldest, 1);
825 blocks++;
828 return freed;
831 /*========================================================================*
832 * map_free_proc *
833 *========================================================================*/
834 int map_free_proc(vmp)
835 struct vmproc *vmp;
837 struct vir_region *r;
839 while((r = region_search_root(&vmp->vm_regions_avl))) {
840 SANITYCHECK(SCL_DETAIL);
841 #if SANITYCHECKS
842 nocheck++;
843 #endif
844 region_remove(&vmp->vm_regions_avl, r->vaddr); /* For sanity checks. */
845 map_free(r);
846 #if SANITYCHECKS
847 nocheck--;
848 #endif
849 SANITYCHECK(SCL_DETAIL);
852 region_init(&vmp->vm_regions_avl);
854 /* Free associated yielded blocks. */
855 free_yielded_proc(vmp);
857 SANITYCHECK(SCL_FUNCTIONS);
859 return OK;
862 /*===========================================================================*
863 * map_lookup *
864 *===========================================================================*/
865 struct vir_region *map_lookup(vmp, offset)
866 struct vmproc *vmp;
867 vir_bytes offset;
869 struct vir_region *r;
871 SANITYCHECK(SCL_FUNCTIONS);
873 #if SANITYCHECKS
874 if(!region_search_root(&vmp->vm_regions_avl))
875 panic("process has no regions: %d", vmp->vm_endpoint);
876 #endif
878 if((r = region_search(&vmp->vm_regions_avl, offset, AVL_LESS_EQUAL))) {
879 if(offset >= r->vaddr && offset < r->vaddr + r->length)
880 return r;
883 SANITYCHECK(SCL_FUNCTIONS);
885 return NULL;
888 static u32_t vrallocflags(u32_t flags)
890 u32_t allocflags = 0;
892 if(flags & VR_PHYS64K)
893 allocflags |= PAF_ALIGN64K;
894 if(flags & VR_LOWER16MB)
895 allocflags |= PAF_LOWER16MB;
896 if(flags & VR_LOWER1MB)
897 allocflags |= PAF_LOWER1MB;
898 if(flags & VR_CONTIG)
899 allocflags |= PAF_CONTIG;
901 return allocflags;
904 /*===========================================================================*
905 * map_new_physblock *
906 *===========================================================================*/
907 static int map_new_physblock(vmp, region, start_offset, length,
908 what_mem, allocflags, written)
909 struct vmproc *vmp;
910 struct vir_region *region;
911 vir_bytes start_offset;
912 vir_bytes length;
913 phys_bytes what_mem;
914 u32_t allocflags;
915 int written;
917 struct memlist *memlist, *ml;
918 int r;
919 vir_bytes mapped = 0;
920 vir_bytes offset = start_offset;
922 SANITYCHECK(SCL_FUNCTIONS);
924 assert(!(length % VM_PAGE_SIZE));
926 if((region->flags & VR_CONTIG) &&
927 (start_offset > 0 || length < region->length)) {
928 printf("VM: region length 0x%lx, offset 0x%lx length 0x%lx\n",
929 region->length, start_offset, length);
930 map_printmap(vmp);
931 printf("VM: map_new_physblock: non-full contig allocation requested\n");
932 return EFAULT;
935 /* Memory for new physical block. */
936 allocflags |= vrallocflags(region->flags);
938 if(allocflags & PAF_CONTIG) {
939 assert(what_mem == MAP_NONE);
940 if((what_mem = alloc_mem(length/VM_PAGE_SIZE, allocflags)) == NO_MEM) {
941 return ENOMEM;
943 what_mem = CLICK2ABS(what_mem);
944 allocflags &= ~PAF_CONTIG;
945 assert(what_mem != MAP_NONE);
948 if(!(memlist = alloc_mem_in_list(length, allocflags, what_mem))) {
949 printf("map_new_physblock: couldn't allocate\n");
950 return ENOMEM;
953 r = OK;
955 for(ml = memlist; ml; ml = ml->next) {
956 struct phys_region *newphysr = NULL;
957 struct phys_block *newpb = NULL;
959 /* Allocate things necessary for this chunk of memory. */
960 if(!(newpb = pb_new(ml->phys)) ||
961 !(newphysr = pb_reference(newpb, offset, region))) {
962 printf("map_new_physblock: no memory for the ph slabs\n");
963 if(newphysr) SLABFREE(newphysr);
964 if(newpb) SLABFREE(newpb);
965 r = ENOMEM;
966 break;
969 /* Update pagetable. */
970 if(map_ph_writept(vmp, region, newphysr) != OK) {
971 printf("map_new_physblock: map_ph_writept failed\n");
972 r = ENOMEM;
973 break;
976 offset += VM_PAGE_SIZE;
977 mapped += VM_PAGE_SIZE;
980 if(r != OK) {
981 offset = start_offset;
982 /* Things did not go well. Undo everything. */
983 for(ml = memlist; ml; ml = ml->next) {
984 struct phys_region *physr;
985 if((physr = physr_search(region->phys, offset,
986 AVL_EQUAL))) {
987 assert(physr->ph->refcount == 1);
988 pb_unreferenced(region, physr);
989 SLABFREE(physr);
991 offset += VM_PAGE_SIZE;
993 } else assert(mapped == length);
995 /* Always clean up the memlist itself, even if everything
996 * worked we're not using the memlist nodes any more. And
997 * the memory they reference is either freed above or in use.
999 free_mem_list(memlist, 0);
1001 SANITYCHECK(SCL_FUNCTIONS);
1003 return r;
1006 /*===========================================================================*
1007 * map_clone_ph_block *
1008 *===========================================================================*/
1009 static struct phys_region *map_clone_ph_block(vmp, region, ph, iter)
1010 struct vmproc *vmp;
1011 struct vir_region *region;
1012 struct phys_region *ph;
1013 physr_iter *iter;
1015 vir_bytes offset;
1016 u32_t allocflags;
1017 phys_bytes physaddr;
1018 struct phys_region *newpr;
1019 int region_has_single_block;
1020 int written = 0;
1021 #if SANITYCHECKS
1022 written = ph->written;
1023 #endif
1024 SANITYCHECK(SCL_FUNCTIONS);
1026 /* Warning: this function will free the passed
1027 * phys_region *ph and replace it (in the same offset)
1028 * with another! So both the pointer to it
1029 * and any iterators over the phys_regions in the vir_region
1030 * will be invalid on successful return. (Iterators over
1031 * the vir_region could be invalid on unsuccessful return too.)
1034 /* This is only to be done if there is more than one copy. */
1035 assert(ph->ph->refcount > 1);
1037 /* This function takes a physical block, copies its contents
1038 * into newly allocated memory, and replaces the single physical
1039 * block by one or more physical blocks with refcount 1 with the
1040 * same contents as the original. In other words, a fragmentable
1041 * version of map_copy_ph_block().
1044 /* Remember where and how much. */
1045 offset = ph->offset;
1046 physaddr = ph->ph->phys;
1048 /* Now unlink the original physical block so we can replace
1049 * it with new ones.
1052 SLABSANE(ph);
1053 SLABSANE(ph->ph);
1054 assert(ph->ph->refcount > 1);
1055 pb_unreferenced(region, ph);
1056 assert(ph->ph->refcount >= 1);
1057 SLABFREE(ph);
1059 SANITYCHECK(SCL_DETAIL);
1061 /* Put new free memory in. */
1062 allocflags = vrallocflags(region->flags);
1063 region_has_single_block = (offset == 0 && region->length == VM_PAGE_SIZE);
1064 assert(region_has_single_block || !(allocflags & PAF_CONTIG));
1065 assert(!(allocflags & PAF_CLEAR));
1067 if(map_new_physblock(vmp, region, offset, VM_PAGE_SIZE,
1068 MAP_NONE, allocflags, written) != OK) {
1069 /* XXX original range now gone. */
1070 printf("VM: map_clone_ph_block: map_new_physblock failed.\n");
1071 return NULL;
1074 /* Copy the block to the new memory.
1075 * Can only fail if map_new_physblock didn't do what we asked.
1077 if(copy_abs2region(physaddr, region, offset, VM_PAGE_SIZE) != OK)
1078 panic("copy_abs2region failed, no good reason for that");
1080 newpr = physr_search(region->phys, offset, AVL_EQUAL);
1081 assert(newpr);
1082 assert(newpr->offset == offset);
1084 if(iter) {
1085 physr_start_iter(region->phys, iter, offset, AVL_EQUAL);
1086 assert(physr_get_iter(iter) == newpr);
1089 SANITYCHECK(SCL_FUNCTIONS);
1091 return newpr;
1095 /*===========================================================================*
1096 * map_pf *
1097 *===========================================================================*/
1098 int map_pf(vmp, region, offset, write)
1099 struct vmproc *vmp;
1100 struct vir_region *region;
1101 vir_bytes offset;
1102 int write;
1104 vir_bytes virpage;
1105 struct phys_region *ph;
1106 int r = OK;
1108 assert(offset >= 0);
1109 assert(offset < region->length);
1111 assert(region->flags & VR_ANON);
1112 assert(!(region->vaddr % VM_PAGE_SIZE));
1114 virpage = offset - offset % VM_PAGE_SIZE;
1116 SANITYCHECK(SCL_FUNCTIONS);
1118 if((ph = physr_search(region->phys, offset, AVL_LESS_EQUAL)) &&
1119 (ph->offset <= offset && offset < ph->offset + VM_PAGE_SIZE)) {
1120 /* Pagefault in existing block. Do copy-on-write. */
1121 assert(write);
1122 assert(region->flags & VR_WRITABLE);
1123 assert(ph->ph->refcount > 0);
1125 if(WRITABLE(region, ph->ph)) {
1126 r = map_ph_writept(vmp, region, ph);
1127 if(r != OK)
1128 printf("map_ph_writept failed\n");
1129 } else {
1130 if(ph->ph->refcount > 0
1131 && ph->ph->share_flag != PBSH_COW) {
1132 printf("VM: write RO mapped pages.\n");
1133 return EFAULT;
1134 } else {
1135 if(!map_clone_ph_block(vmp, region, ph, NULL))
1136 r = ENOMEM;
1139 } else {
1140 /* Pagefault in non-existing block. Map in new block. */
1141 if(map_new_physblock(vmp, region, virpage,
1142 VM_PAGE_SIZE, MAP_NONE, PAF_CLEAR, 0) != OK) {
1143 printf("map_new_physblock failed\n");
1144 r = ENOMEM;
1148 SANITYCHECK(SCL_FUNCTIONS);
1150 if(r != OK) {
1151 printf("VM: map_pf: failed (%d)\n", r);
1152 return r;
1155 #if SANITYCHECKS
1156 if(OK != pt_checkrange(&vmp->vm_pt, region->vaddr+virpage,
1157 VM_PAGE_SIZE, write)) {
1158 panic("map_pf: pt_checkrange failed: %d", r);
1160 #endif
1162 return r;
1165 /*===========================================================================*
1166 * map_pin_memory *
1167 *===========================================================================*/
1168 int map_pin_memory(struct vmproc *vmp)
1170 struct vir_region *vr;
1171 int r;
1172 region_iter iter;
1173 region_start_iter_least(&vmp->vm_regions_avl, &iter);
1174 /* Scan all memory regions. */
1175 while((vr = region_get_iter(&iter))) {
1176 /* Make sure region is mapped to physical memory and writable.*/
1177 r = map_handle_memory(vmp, vr, 0, vr->length, 1);
1178 if(r != OK) {
1179 panic("map_pin_memory: map_handle_memory failed: %d", r);
1181 region_incr_iter(&iter);
1183 return OK;
1186 /*===========================================================================*
1187 * map_handle_memory *
1188 *===========================================================================*/
1189 int map_handle_memory(vmp, region, offset, length, write)
1190 struct vmproc *vmp;
1191 struct vir_region *region;
1192 vir_bytes offset, length;
1193 int write;
1195 struct phys_region *physr, *nextphysr;
1196 int changes = 0;
1197 physr_iter iter;
1198 u32_t allocflags = 0;
1200 if(!(region->flags & VR_UNINITIALIZED)) {
1201 allocflags = PAF_CLEAR;
1204 #define FREE_RANGE_HERE(er1, er2) { \
1205 struct phys_region *r1 = (er1), *r2 = (er2); \
1206 vir_bytes start = offset, end = offset + length; \
1207 if(r1) { \
1208 start = MAX(start, r1->offset + VM_PAGE_SIZE); } \
1209 if(r2) { \
1210 end = MIN(end, r2->offset); } \
1211 if(start < end) { \
1212 SANITYCHECK(SCL_DETAIL); \
1213 if(map_new_physblock(vmp, region, start, \
1214 end-start, MAP_NONE, allocflags, 0) != OK) { \
1215 SANITYCHECK(SCL_DETAIL); \
1216 return ENOMEM; \
1218 changes++; \
1222 SANITYCHECK(SCL_FUNCTIONS);
1224 assert(region->flags & VR_ANON);
1225 assert(!(region->vaddr % VM_PAGE_SIZE));
1226 assert(!(offset % VM_PAGE_SIZE));
1227 assert(!(length % VM_PAGE_SIZE));
1228 assert(!write || (region->flags & VR_WRITABLE));
1230 physr_start_iter(region->phys, &iter, offset, AVL_LESS_EQUAL);
1231 physr = physr_get_iter(&iter);
1233 if(!physr) {
1234 physr_start_iter(region->phys, &iter, offset, AVL_GREATER_EQUAL);
1235 physr = physr_get_iter(&iter);
1238 FREE_RANGE_HERE(NULL, physr);
1240 if(physr) {
1241 physr = reset_physr_iter(region, &iter, physr->offset);
1242 if(physr->offset + VM_PAGE_SIZE <= offset) {
1243 physr_incr_iter(&iter);
1244 physr = physr_get_iter(&iter);
1246 FREE_RANGE_HERE(NULL, physr);
1247 if(physr) {
1248 physr = reset_physr_iter(region, &iter,
1249 physr->offset);
1254 while(physr) {
1255 int r;
1257 SANITYCHECK(SCL_DETAIL);
1259 if(write) {
1260 assert(physr->ph->refcount > 0);
1261 if(!WRITABLE(region, physr->ph)) {
1262 if(!(physr = map_clone_ph_block(vmp, region,
1263 physr, &iter))) {
1264 printf("VM: map_handle_memory: no copy\n");
1265 return ENOMEM;
1267 changes++;
1268 } else {
1269 SANITYCHECK(SCL_DETAIL);
1270 if((r=map_ph_writept(vmp, region, physr)) != OK) {
1271 printf("VM: map_ph_writept failed\n");
1272 return r;
1274 changes++;
1275 SANITYCHECK(SCL_DETAIL);
1279 SANITYCHECK(SCL_DETAIL);
1280 physr_incr_iter(&iter);
1281 nextphysr = physr_get_iter(&iter);
1282 FREE_RANGE_HERE(physr, nextphysr);
1283 SANITYCHECK(SCL_DETAIL);
1284 if(nextphysr) {
1285 if(nextphysr->offset >= offset + length)
1286 break;
1287 nextphysr = reset_physr_iter(region, &iter,
1288 nextphysr->offset);
1290 physr = nextphysr;
1293 SANITYCHECK(SCL_FUNCTIONS);
1295 if(changes < 1) {
1296 #if VERBOSE
1297 printf("region start at 0x%lx offset 0x%lx len 0x%lx write %d\n",
1298 region->vaddr, offset, length, write);
1299 printf("no changes in map_handle_memory\n");
1300 #endif
1301 return EFAULT;
1304 #if SANITYCHECKS
1305 if(OK != pt_checkrange(&vmp->vm_pt, region->vaddr+offset, length, write)) {
1306 printf("handle mem 0x%lx-0x%lx failed\n",
1307 region->vaddr+offset,region->vaddr+offset+length);
1308 map_printregion(vmp, region);
1309 panic("checkrange failed");
1311 #endif
1313 return OK;
1316 #if SANITYCHECKS
1317 static int count_phys_regions(struct vir_region *vr)
1319 int n = 0;
1320 struct phys_region *ph;
1321 physr_iter iter;
1322 physr_start_iter_least(vr->phys, &iter);
1323 while((ph = physr_get_iter(&iter))) {
1324 n++;
1325 physr_incr_iter(&iter);
1327 return n;
1329 #endif
1331 /*===========================================================================*
1332 * map_copy_region *
1333 *===========================================================================*/
1334 static struct vir_region *map_copy_region(struct vmproc *vmp, struct vir_region *vr)
1336 /* map_copy_region creates a complete copy of the vir_region
1337 * data structure, linking in the same phys_blocks directly,
1338 * but all in limbo, i.e., the caller has to link the vir_region
1339 * to a process. Therefore it doesn't increase the refcount in
1340 * the phys_block; the caller has to do this once it's linked.
1341 * The reason for this is to keep the sanity checks working
1342 * within this function.
1344 struct vir_region *newvr;
1345 struct phys_region *ph;
1346 physr_iter iter;
1347 #if SANITYCHECKS
1348 int cr;
1349 cr = count_phys_regions(vr);
1350 #endif
1352 if(!(newvr = region_new(vr->parent, vr->vaddr, vr->length, vr->flags)))
1353 return NULL;
1355 physr_start_iter_least(vr->phys, &iter);
1356 while((ph = physr_get_iter(&iter))) {
1357 struct phys_region *newph = pb_reference(ph->ph, ph->offset, newvr);
1359 if(!newph) { map_free(newvr); return NULL; }
1361 #if SANITYCHECKS
1362 USE(newph, newph->written = 0;);
1363 assert(count_phys_regions(vr) == cr);
1364 #endif
1365 physr_incr_iter(&iter);
1368 #if SANITYCHECKS
1369 assert(count_phys_regions(vr) == count_phys_regions(newvr));
1370 #endif
1372 return newvr;
1375 /*===========================================================================*
1376 * copy_abs2region *
1377 *===========================================================================*/
1378 int copy_abs2region(phys_bytes abs, struct vir_region *destregion,
1379 phys_bytes offset, phys_bytes len)
1382 assert(destregion);
1383 assert(destregion->phys);
1384 while(len > 0) {
1385 phys_bytes sublen, suboffset;
1386 struct phys_region *ph;
1387 assert(destregion);
1388 assert(destregion->phys);
1389 if(!(ph = physr_search(destregion->phys, offset, AVL_LESS_EQUAL))) {
1390 printf("VM: copy_abs2region: no phys region found (1).\n");
1391 return EFAULT;
1393 assert(ph->offset <= offset);
1394 if(ph->offset+VM_PAGE_SIZE <= offset) {
1395 printf("VM: copy_abs2region: no phys region found (2).\n");
1396 return EFAULT;
1398 suboffset = offset - ph->offset;
1399 assert(suboffset < VM_PAGE_SIZE);
1400 sublen = len;
1401 if(sublen > VM_PAGE_SIZE - suboffset)
1402 sublen = VM_PAGE_SIZE - suboffset;
1403 assert(suboffset + sublen <= VM_PAGE_SIZE);
1404 if(ph->ph->refcount != 1) {
1405 printf("VM: copy_abs2region: refcount not 1.\n");
1406 return EFAULT;
1409 if(sys_abscopy(abs, ph->ph->phys + suboffset, sublen) != OK) {
1410 printf("VM: copy_abs2region: abscopy failed.\n");
1411 return EFAULT;
1413 abs += sublen;
1414 offset += sublen;
1415 len -= sublen;
1418 return OK;
1421 /*=========================================================================*
1422 * map_writept *
1423 *=========================================================================*/
1424 int map_writept(struct vmproc *vmp)
1426 struct vir_region *vr;
1427 struct phys_region *ph;
1428 int r;
1429 region_iter v_iter;
1430 region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
1432 while((vr = region_get_iter(&v_iter))) {
1433 physr_iter ph_iter;
1434 physr_start_iter_least(vr->phys, &ph_iter);
1436 while((ph = physr_get_iter(&ph_iter))) {
1437 physr_incr_iter(&ph_iter);
1439 /* If this phys block is shared as SMAP, then do
1440 * not update the page table. */
1441 if(ph->ph->refcount > 1
1442 && ph->ph->share_flag == PBSH_SMAP) {
1443 continue;
1446 if((r=map_ph_writept(vmp, vr, ph)) != OK) {
1447 printf("VM: map_writept: failed\n");
1448 return r;
1451 region_incr_iter(&v_iter);
1454 return OK;
1457 /*========================================================================*
1458 * map_proc_copy *
1459 *========================================================================*/
1460 int map_proc_copy(dst, src)
1461 struct vmproc *dst;
1462 struct vmproc *src;
1464 /* Copy all the memory regions from the src process to the dst process. */
1465 region_init(&dst->vm_regions_avl);
1467 return map_proc_copy_from(dst, src, NULL);
1470 /*========================================================================*
1471 * map_proc_copy_from *
1472 *========================================================================*/
1473 int map_proc_copy_from(dst, src, start_src_vr)
1474 struct vmproc *dst;
1475 struct vmproc *src;
1476 struct vir_region *start_src_vr;
1478 struct vir_region *vr;
1479 region_iter v_iter;
1481 if(!start_src_vr)
1482 start_src_vr = region_search_least(&src->vm_regions_avl);
1484 assert(start_src_vr);
1485 assert(start_src_vr->parent == src);
1486 region_start_iter(&src->vm_regions_avl, &v_iter,
1487 start_src_vr->vaddr, AVL_EQUAL);
1488 assert(region_get_iter(&v_iter) == start_src_vr);
1490 /* Copy source regions after the destination's last region (if any). */
1492 SANITYCHECK(SCL_FUNCTIONS);
1494 while((vr = region_get_iter(&v_iter))) {
1495 physr_iter iter_orig, iter_new;
1496 struct vir_region *newvr;
1497 struct phys_region *orig_ph, *new_ph;
1498 if(!(newvr = map_copy_region(dst, vr))) {
1499 map_free_proc(dst);
1500 return ENOMEM;
1502 USE(newvr, newvr->parent = dst;);
1503 region_insert(&dst->vm_regions_avl, newvr);
1504 physr_start_iter_least(vr->phys, &iter_orig);
1505 physr_start_iter_least(newvr->phys, &iter_new);
1506 while((orig_ph = physr_get_iter(&iter_orig))) {
1507 struct phys_block *pb;
1508 new_ph = physr_get_iter(&iter_new);
1509 /* Check two physregions both are nonnull,
1510 * are different, and match physblocks.
1512 assert(new_ph);
1513 assert(orig_ph);
1514 assert(orig_ph != new_ph);
1515 pb = orig_ph->ph;
1516 assert(orig_ph->ph == new_ph->ph);
1518 /* If the phys block has been shared as SMAP,
1519 * do the regular copy. */
1520 if(pb->refcount > 2 && pb->share_flag == PBSH_SMAP) {
1521 map_clone_ph_block(dst, newvr,new_ph,
1522 &iter_new);
1523 } else {
1524 USE(pb, pb->share_flag = PBSH_COW;);
1527 /* Get next new physregion */
1528 physr_incr_iter(&iter_orig);
1529 physr_incr_iter(&iter_new);
1531 assert(!physr_get_iter(&iter_new));
1532 region_incr_iter(&v_iter);
1535 map_writept(src);
1536 map_writept(dst);
1538 SANITYCHECK(SCL_FUNCTIONS);
1539 return OK;
1542 int map_region_extend_upto_v(struct vmproc *vmp, vir_bytes v)
1544 vir_bytes offset = v, end;
1545 struct vir_region *vr, *nextvr;
1546 int r = OK;
1548 if(!(vr = region_search(&vmp->vm_regions_avl, offset, AVL_LESS))) {
1549 printf("VM: nothing to extend\n");
1550 return ENOMEM;
1553 if(!(vr->flags & VR_ANON)) {
1554 printf("VM: memory range to extend not anonymous\n");
1555 return ENOMEM;
1558 assert(vr->vaddr <= offset);
1559 if((nextvr = getnextvr(vr))) {
1560 assert(offset <= nextvr->vaddr);
1563 end = vr->vaddr + vr->length;
1565 offset = roundup(offset, VM_PAGE_SIZE);
1567 if(end < offset)
1568 r = map_region_extend(vmp, vr, offset - end);
1570 return r;
1573 /*========================================================================*
1574 * map_region_extend *
1575 *========================================================================*/
1576 int map_region_extend(struct vmproc *vmp, struct vir_region *vr,
1577 vir_bytes delta)
1579 vir_bytes end;
1580 struct vir_region *nextvr;
1582 assert(vr);
1583 assert(vr->flags & VR_ANON);
1584 assert(!(delta % VM_PAGE_SIZE));
1585 if(vr->flags & VR_CONTIG) {
1586 printf("VM: can't grow contig region\n");
1587 return EFAULT;
1590 if(!delta) return OK;
1591 end = vr->vaddr + vr->length;
1592 assert(end >= vr->vaddr);
1594 if(end + delta <= end) {
1595 printf("VM: strange delta 0x%lx\n", delta);
1596 return ENOMEM;
1599 nextvr = getnextvr(vr);
1601 if(!nextvr || end + delta <= nextvr->vaddr) {
1602 USE(vr, vr->length += delta;);
1603 return OK;
1606 return ENOMEM;
1609 /*========================================================================*
1610 * map_region_shrink *
1611 *========================================================================*/
1612 int map_region_shrink(struct vir_region *vr, vir_bytes delta)
1614 assert(vr);
1615 assert(vr->flags & VR_ANON);
1616 assert(!(delta % VM_PAGE_SIZE));
1618 #if 0
1619 printf("VM: ignoring region shrink\n");
1620 #endif
1622 return OK;
1625 struct vir_region *map_region_lookup_tag(vmp, tag)
1626 struct vmproc *vmp;
1627 u32_t tag;
1629 struct vir_region *vr;
1630 region_iter v_iter;
1631 region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
1633 while((vr = region_get_iter(&v_iter))) {
1634 if(vr->tag == tag)
1635 return vr;
1636 region_incr_iter(&v_iter);
1639 return NULL;
1642 void map_region_set_tag(struct vir_region *vr, u32_t tag)
1644 USE(vr, vr->tag = tag;);
1647 u32_t map_region_get_tag(struct vir_region *vr)
1649 return vr->tag;
1652 /*========================================================================*
1653 * map_unmap_region *
1654 *========================================================================*/
1655 int map_unmap_region(struct vmproc *vmp, struct vir_region *r,
1656 vir_bytes len)
1658 /* Shrink the region by 'len' bytes, from the start. Unreference
1659 * memory it used to reference if any.
1661 vir_bytes regionstart;
1663 SANITYCHECK(SCL_FUNCTIONS);
1665 if(len > r->length || (len % VM_PAGE_SIZE)) {
1666 printf("VM: bogus length 0x%lx\n", len);
1667 return EINVAL;
1670 if(!(r->flags & (VR_ANON|VR_DIRECT))) {
1671 printf("VM: only unmap anonymous or direct memory\n");
1672 return EINVAL;
1675 regionstart = r->vaddr;
1677 if(len == r->length) {
1678 SANITYCHECK(SCL_DETAIL);
1679 /* Whole region disappears. Unlink and free it. */
1680 region_remove(&vmp->vm_regions_avl, r->vaddr);
1681 map_free(r);
1682 } else {
1683 struct phys_region *pr;
1684 physr_iter iter;
1685 /* Region shrinks. First unreference its memory
1686 * and then shrink the region.
1688 SANITYCHECK(SCL_DETAIL);
1689 map_subfree(r, len);
1690 USE(r,
1691 r->vaddr += len;
1692 r->length -= len;);
1693 physr_start_iter_least(r->phys, &iter);
1695 /* vaddr has increased; to make all the phys_regions
1696 * point to the same addresses, make them shrink by the
1697 * same amount.
1699 while((pr = physr_get_iter(&iter))) {
1700 assert(pr->offset >= len);
1701 USE(pr, pr->offset -= len;);
1702 physr_incr_iter(&iter);
1706 SANITYCHECK(SCL_DETAIL);
1708 if(pt_writemap(vmp, &vmp->vm_pt, regionstart,
1709 MAP_NONE, len, 0, WMF_OVERWRITE) != OK) {
1710 printf("VM: map_unmap_region: pt_writemap failed\n");
1711 return ENOMEM;
1714 SANITYCHECK(SCL_FUNCTIONS);
1716 return OK;
1719 /*========================================================================*
1720 * map_remap *
1721 *========================================================================*/
1722 int map_remap(struct vmproc *dvmp, vir_bytes da, size_t size,
1723 struct vir_region *region, vir_bytes *r, int readonly)
1725 struct vir_region *vr;
1726 struct phys_region *ph;
1727 vir_bytes startv, dst_addr;
1728 physr_iter iter;
1730 SANITYCHECK(SCL_FUNCTIONS);
1732 assert(region->flags & VR_SHARED);
1734 /* da is handled differently */
1735 if (!da)
1736 dst_addr = 0;
1737 else
1738 dst_addr = da;
1740 /* round up to page size */
1741 assert(!(size % VM_PAGE_SIZE));
1742 startv = region_find_slot(dvmp, dst_addr, VM_DATATOP, size);
1743 if (startv == SLOT_FAIL) {
1744 return ENOMEM;
1746 /* when the user specifies the address, we cannot change it */
1747 if (da && (startv != dst_addr))
1748 return EINVAL;
1750 vr = map_copy_region(dvmp, region);
1751 if(!vr)
1752 return ENOMEM;
1754 USE(vr,
1755 vr->vaddr = startv;
1756 vr->length = size;
1757 vr->flags = region->flags;
1758 vr->tag = VRT_NONE;
1759 vr->parent = dvmp;
1760 if(readonly) {
1761 vr->flags &= ~VR_WRITABLE;
1764 assert(vr->flags & VR_SHARED);
1766 region_insert(&dvmp->vm_regions_avl, vr);
1768 physr_start_iter_least(vr->phys, &iter);
1769 while((ph = physr_get_iter(&iter))) {
1770 if(map_ph_writept(dvmp, vr, ph) != OK) {
1771 panic("map_remap: map_ph_writept failed");
1773 physr_incr_iter(&iter);
1776 *r = startv;
1778 SANITYCHECK(SCL_FUNCTIONS);
1780 return OK;
1783 /*========================================================================*
1784 * map_get_phys *
1785 *========================================================================*/
1786 int map_get_phys(struct vmproc *vmp, vir_bytes addr, phys_bytes *r)
1788 struct vir_region *vr;
1789 struct phys_region *ph;
1790 physr_iter iter;
1792 if (!(vr = map_lookup(vmp, addr)) ||
1793 (vr->vaddr != addr))
1794 return EINVAL;
1796 if (!(vr->flags & VR_SHARED))
1797 return EINVAL;
1799 physr_start_iter_least(vr->phys, &iter);
1800 ph = physr_get_iter(&iter);
1802 assert(ph);
1803 assert(ph->ph);
1804 if (r)
1805 *r = ph->ph->phys;
1807 return OK;
1810 /*========================================================================*
1811 * map_get_ref *
1812 *========================================================================*/
1813 int map_get_ref(struct vmproc *vmp, vir_bytes addr, u8_t *cnt)
1815 struct vir_region *vr;
1816 struct phys_region *ph;
1817 physr_iter iter;
1819 if (!(vr = map_lookup(vmp, addr)) ||
1820 (vr->vaddr != addr))
1821 return EINVAL;
1823 if (!(vr->flags & VR_SHARED))
1824 return EINVAL;
1826 physr_start_iter_least(vr->phys, &iter);
1827 ph = physr_get_iter(&iter);
1829 assert(ph);
1830 assert(ph->ph);
1831 if (cnt)
1832 *cnt = ph->ph->refcount;
1834 return OK;
1837 /*========================================================================*
1838 * get_stats_info *
1839 *========================================================================*/
1840 void get_stats_info(struct vm_stats_info *vsi)
1842 yielded_t *yb;
1844 vsi->vsi_cached = 0L;
1846 for(yb = lru_youngest; yb; yb = yb->older)
1847 vsi->vsi_cached++;
1850 /*========================================================================*
1851 * get_usage_info *
1852 *========================================================================*/
1853 void get_usage_info(struct vmproc *vmp, struct vm_usage_info *vui)
1855 struct vir_region *vr;
1856 physr_iter iter;
1857 struct phys_region *ph;
1858 region_iter v_iter;
1859 region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
1861 memset(vui, 0, sizeof(*vui));
1863 while((vr = region_get_iter(&v_iter))) {
1864 physr_start_iter_least(vr->phys, &iter);
1865 while((ph = physr_get_iter(&iter))) {
1866 /* All present pages are counted towards the total. */
1867 vui->vui_total += VM_PAGE_SIZE;
1869 if (ph->ph->refcount > 1) {
1870 /* Any page with a refcount > 1 is common. */
1871 vui->vui_common += VM_PAGE_SIZE;
1873 /* Any common, non-COW page is shared. */
1874 if (vr->flags & VR_SHARED ||
1875 ph->ph->share_flag == PBSH_SMAP)
1876 vui->vui_shared += VM_PAGE_SIZE;
1878 physr_incr_iter(&iter);
1880 region_incr_iter(&v_iter);
1884 /*===========================================================================*
1885 * get_region_info *
1886 *===========================================================================*/
1887 int get_region_info(struct vmproc *vmp, struct vm_region_info *vri,
1888 int max, vir_bytes *nextp)
1890 struct vir_region *vr;
1891 vir_bytes next;
1892 int count;
1893 region_iter v_iter;
1895 next = *nextp;
1897 if (!max) return 0;
1899 region_start_iter(&vmp->vm_regions_avl, &v_iter, next, AVL_GREATER_EQUAL);
1900 if(!(vr = region_get_iter(&v_iter))) return 0;
1902 for(count = 0; (vr = region_get_iter(&v_iter)) && count < max; count++, vri++) {
1903 struct phys_region *ph1, *ph2;
1905 /* Report part of the region that's actually in use. */
1907 /* Get first and last phys_regions, if any */
1908 ph1 = physr_search_least(vr->phys);
1909 ph2 = physr_search_greatest(vr->phys);
1910 if(!ph1 || !ph2) { assert(!ph1 && !ph2); continue; }
1912 /* Report start+length of region starting from lowest use. */
1913 vri->vri_addr = vr->vaddr + ph1->offset;
1914 vri->vri_prot = 0;
1915 vri->vri_length = ph2->offset + VM_PAGE_SIZE - ph1->offset;
1917 /* "AND" the provided protection with per-page protection. */
1918 if (!(vr->flags & VR_WRITABLE))
1919 vri->vri_prot &= ~PROT_WRITE;
1921 vri->vri_flags = (vr->flags & VR_SHARED) ? MAP_IPC_SHARED : 0;
1923 next = vr->vaddr + vr->length;
1924 region_incr_iter(&v_iter);
1927 *nextp = next;
1928 return count;
1931 /*========================================================================*
1932 * regionprintstats *
1933 *========================================================================*/
1934 void printregionstats(struct vmproc *vmp)
1936 struct vir_region *vr;
1937 struct phys_region *pr;
1938 physr_iter iter;
1939 vir_bytes used = 0, weighted = 0;
1940 region_iter v_iter;
1941 region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
1943 while((vr = region_get_iter(&v_iter))) {
1944 region_incr_iter(&v_iter);
1945 if(vr->flags & VR_DIRECT)
1946 continue;
1947 physr_start_iter_least(vr->phys, &iter);
1948 while((pr = physr_get_iter(&iter))) {
1949 physr_incr_iter(&iter);
1950 used += VM_PAGE_SIZE;
1951 weighted += VM_PAGE_SIZE / pr->ph->refcount;
1955 printf("%6lukB %6lukB\n", used/1024, weighted/1024);
1957 return;
1960 /*===========================================================================*
1961 * do_map_memory *
1962 *===========================================================================*/
1963 static int do_map_memory(struct vmproc *vms, struct vmproc *vmd,
1964 struct vir_region *vrs, struct vir_region *vrd,
1965 vir_bytes offset_s, vir_bytes offset_d,
1966 vir_bytes length, int flag)
1968 struct phys_region *prs;
1969 struct phys_region *newphysr;
1970 struct phys_block *pb;
1971 physr_iter iter;
1972 u32_t pt_flag = PTF_PRESENT | PTF_USER;
1973 vir_bytes end;
1975 /* Search for the first phys region in the source process. */
1976 physr_start_iter(vrs->phys, &iter, offset_s, AVL_EQUAL);
1977 prs = physr_get_iter(&iter);
1978 if(!prs)
1979 panic("do_map_memory: no aligned phys region: %d", 0);
1981 /* flag: 0 -> read-only
1982 * 1 -> writable
1983 * -1 -> share as COW, so read-only
1985 if(flag > 0)
1986 pt_flag |= PTF_WRITE;
1987 else
1988 pt_flag |= PTF_READ;
1990 /* Map phys blocks in the source process to the destination process. */
1991 end = offset_d + length;
1992 while((prs = physr_get_iter(&iter)) && offset_d < end) {
1993 /* If a SMAP share was requested but the phys block has already
1994 * been shared as COW, copy the block for the source phys region
1995 * first.
1997 pb = prs->ph;
1998 if(flag >= 0 && pb->refcount > 1
1999 && pb->share_flag == PBSH_COW) {
2000 if(!(prs = map_clone_ph_block(vms, vrs, prs, &iter)))
2001 return ENOMEM;
2002 pb = prs->ph;
2005 /* Allocate a new phys region. */
2006 if(!(newphysr = pb_reference(pb, offset_d, vrd)))
2007 return ENOMEM;
2009 /* If a COW share was requested but the phys block has already
2010 * been shared as SMAP, give up on COW and copy the block for
2011 * the destination phys region now.
2013 if(flag < 0 && pb->refcount > 1
2014 && pb->share_flag == PBSH_SMAP) {
2015 if(!(newphysr = map_clone_ph_block(vmd, vrd,
2016 newphysr, NULL))) {
2017 return ENOMEM;
2020 else {
2021 /* See if this is a COW share or SMAP share. */
2022 if(flag < 0) { /* COW share */
2023 pb->share_flag = PBSH_COW;
2024 /* Update the page table for the src process. */
2025 pt_writemap(vms, &vms->vm_pt, offset_s + vrs->vaddr,
2026 pb->phys, VM_PAGE_SIZE,
2027 pt_flag, WMF_OVERWRITE);
2029 else { /* SMAP share */
2030 pb->share_flag = PBSH_SMAP;
2032 /* Update the page table for the destination process. */
2033 pt_writemap(vmd, &vmd->vm_pt, offset_d + vrd->vaddr,
2034 pb->phys, VM_PAGE_SIZE, pt_flag, WMF_OVERWRITE);
2037 physr_incr_iter(&iter);
2038 offset_d += VM_PAGE_SIZE;
2039 offset_s += VM_PAGE_SIZE;
2041 return OK;
2044 /*===========================================================================*
2045 * unmap_memory *
2046 *===========================================================================*/
2047 int unmap_memory(endpoint_t sour, endpoint_t dest,
2048 vir_bytes virt_s, vir_bytes virt_d, vir_bytes length, int flag)
2050 struct vmproc *vmd;
2051 struct vir_region *vrd;
2052 struct phys_region *pr;
2053 struct phys_block *pb;
2054 physr_iter iter;
2055 vir_bytes off, end;
2056 int p;
2058 /* Use information on the destination process to unmap. */
2059 if(vm_isokendpt(dest, &p) != OK)
2060 panic("unmap_memory: bad endpoint: %d", dest);
2061 vmd = &vmproc[p];
2063 vrd = map_lookup(vmd, virt_d);
2064 assert(vrd);
2066 /* Search for the first phys region in the destination process. */
2067 off = virt_d - vrd->vaddr;
2068 physr_start_iter(vrd->phys, &iter, off, AVL_EQUAL);
2069 pr = physr_get_iter(&iter);
2070 if(!pr)
2071 panic("unmap_memory: no aligned phys region: %d", 0);
2073 /* Copy the phys block now rather than doing COW. */
2074 end = off + length;
2075 while((pr = physr_get_iter(&iter)) && off < end) {
2076 pb = pr->ph;
2077 assert(pb->refcount > 1);
2078 assert(pb->share_flag == PBSH_SMAP);
2080 if(!(pr = map_clone_ph_block(vmd, vrd, pr, &iter)))
2081 return ENOMEM;
2083 physr_incr_iter(&iter);
2084 off += VM_PAGE_SIZE;
2087 return OK;
2091 /*===========================================================================*
2092 * rm_phys_regions *
2093 *===========================================================================*/
2094 static void rm_phys_regions(struct vir_region *region,
2095 vir_bytes begin, vir_bytes length)
2097 /* Remove all phys regions between @begin and @begin+length.
2099 * Don't update the page table, because we will update it at map_memory()
2100 * later.
2102 struct phys_region *pr;
2103 physr_iter iter;
2105 physr_start_iter(region->phys, &iter, begin, AVL_GREATER_EQUAL);
2106 while((pr = physr_get_iter(&iter)) && pr->offset < begin + length) {
2107 pb_unreferenced(region, pr);
2108 physr_start_iter(region->phys, &iter, begin,
2109 AVL_GREATER_EQUAL);
2110 SLABFREE(pr);
2114 /*===========================================================================*
2115 * map_memory *
2116 *===========================================================================*/
2117 int map_memory(endpoint_t sour, endpoint_t dest,
2118 vir_bytes virt_s, vir_bytes virt_d, vir_bytes length, int flag)
2120 /* This is the entry point. This function will be called by handle_memory() when
2121 * VM recieves a map-memory request.
2123 struct vmproc *vms, *vmd;
2124 struct vir_region *vrs, *vrd;
2125 vir_bytes offset_s, offset_d;
2126 int p;
2127 int r;
2129 if(vm_isokendpt(sour, &p) != OK)
2130 panic("map_memory: bad endpoint: %d", sour);
2131 vms = &vmproc[p];
2132 if(vm_isokendpt(dest, &p) != OK)
2133 panic("map_memory: bad endpoint: %d", dest);
2134 vmd = &vmproc[p];
2136 vrs = map_lookup(vms, virt_s);
2137 assert(vrs);
2138 vrd = map_lookup(vmd, virt_d);
2139 assert(vrd);
2141 /* Linear address -> offset from start of vir region. */
2142 offset_s = virt_s - vrs->vaddr;
2143 offset_d = virt_d - vrd->vaddr;
2145 /* Make sure that the range in the source process has been mapped
2146 * to physical memory.
2148 map_handle_memory(vms, vrs, offset_s, length, 0);
2150 /* Prepare work. */
2151 rm_phys_regions(vrd, offset_d, length);
2153 /* Map memory. */
2154 r = do_map_memory(vms, vmd, vrs, vrd, offset_s, offset_d, length, flag);
2156 return r;
2159 /*===========================================================================*
2160 * get_clean_phys_region *
2161 *===========================================================================*/
2162 static struct phys_region *
2163 get_clean_phys_region(struct vmproc *vmp, vir_bytes vaddr, struct vir_region **ret_region)
2165 struct vir_region *region;
2166 vir_bytes regionoffset, mapaddr;
2167 struct phys_region *ph;
2169 mapaddr = vaddr;
2171 if(!(region = map_lookup(vmp, mapaddr))) {
2172 printf("VM: get_clean_phys_region: 0x%lx not found\n", vaddr);
2173 return NULL;
2176 if(!(region->flags & VR_ANON)) {
2177 printf("VM: get_clean_phys_region: non-anon 0x%lx\n", vaddr);
2178 return NULL;
2181 assert(mapaddr >= region->vaddr);
2182 assert(mapaddr < region->vaddr + region->length);
2184 regionoffset = mapaddr-region->vaddr;
2186 /* For now, only support the yielding of blocks that are
2187 * exactly a mapped phys_region. Go get that phys_region.
2188 * (This can be improved without changing the interface.)
2190 if(!(ph = physr_search(region->phys, regionoffset,
2191 AVL_EQUAL))) {
2192 printf("VM: get_clean_phys_region: exact block not found\n");
2193 return NULL;
2196 /* Make sure this is what we asked for. */
2197 assert(ph->offset == regionoffset);
2199 /* If it's mapped more than once, make a copy. */
2200 assert(ph->ph->refcount > 0);
2201 if(ph->ph->refcount > 1) {
2202 if(!(ph = map_clone_ph_block(vmp, region,
2203 ph, NULL))) {
2204 printf("VM: get_clean_phys_region: ph copy failed\n");
2205 return NULL;
2209 assert(ph->ph->refcount == 1);
2211 *ret_region = region;
2213 return ph;
2216 static int getblock(struct vmproc *vmp, u64_t id, vir_bytes vaddr)
2218 yielded_t *yb;
2219 struct phys_region *ph;
2220 struct vir_region *region;
2221 yielded_avl *avl;
2222 block_id_t blockid;
2224 /* Try to get the yielded block */
2225 blockid.owner = vmp->vm_endpoint;
2226 blockid.id = id;
2227 avl = get_yielded_avl(blockid);
2228 if(!(yb = yielded_search(avl, blockid, AVL_EQUAL))) {
2229 return ESRCH;
2232 /* Get the intended phys region, make sure refcount is 1. */
2233 if(!(ph = get_clean_phys_region(vmp, vaddr, &region))) {
2234 printf("VM: getblock: not found for %d\n", vmp->vm_endpoint);
2235 return EINVAL;
2238 assert(ph->ph->refcount == 1);
2240 /* Free the block that is currently there. */
2241 free_mem(ABS2CLICK(ph->ph->phys), 1);
2243 /* Set the phys block to new addr and update pagetable. */
2244 USE(ph->ph, ph->ph->phys = yb->addr;);
2245 if(map_ph_writept(vmp, region, ph) != OK) {
2246 /* Presumably it was mapped, so there is no reason
2247 * updating should fail.
2249 panic("do_get_block: couldn't write pt");
2252 /* Forget about the yielded block and free the struct. */
2253 freeyieldednode(yb, 0);
2255 return OK;
2258 static int yieldblock(struct vmproc *vmp, u64_t id,
2259 vir_bytes vaddr, yielded_t **retyb)
2261 yielded_t *newyb;
2262 vir_bytes mem_clicks, clicks;
2263 struct vir_region *region;
2264 struct phys_region *ph;
2265 yielded_avl *avl;
2266 block_id_t blockid;
2268 /* Makes no sense if yielded block ID already exists, and
2269 * is likely a serious bug in the caller.
2271 blockid.id = id;
2272 blockid.owner = vmp->vm_endpoint;
2273 avl = get_yielded_avl(blockid);
2274 if(yielded_search(avl, blockid, AVL_EQUAL)) {
2275 printf("!");
2276 return EINVAL;
2279 if(vaddr % VM_PAGE_SIZE) return EFAULT;
2281 if(!(ph = get_clean_phys_region(vmp, vaddr, &region))) {
2282 printf("VM: do_yield_block: not found for %d\n",
2283 vmp->vm_endpoint);
2284 return EINVAL;
2287 /* Make a new block to record the yielding in. */
2288 if(!SLABALLOC(newyb)) {
2289 return ENOMEM;
2292 assert(!(ph->ph->phys % VM_PAGE_SIZE));
2294 clicks = 1;
2295 if((mem_clicks = alloc_mem(clicks, PAF_CLEAR)) == NO_MEM) {
2296 SLABFREE(newyb);
2297 return ENOMEM;
2300 /* Update yielded block info. */
2301 USE(newyb,
2302 newyb->id = blockid;
2303 newyb->addr = ph->ph->phys;
2304 newyb->younger = NULL;);
2306 /* Set new phys block to new addr and update pagetable. */
2307 USE(ph->ph,
2308 ph->ph->phys = CLICK2ABS(mem_clicks););
2309 if(map_ph_writept(vmp, region, ph) != OK) {
2310 /* Presumably it was mapped, so there is no reason
2311 * updating should fail.
2313 panic("yield_block: couldn't write pt");
2316 /* Remember yielded block. */
2318 yielded_insert(avl, newyb);
2319 vmp->vm_yielded++;
2321 /* Add to LRU list too. It's the youngest block. */
2322 LRUCHECK;
2324 if(lru_youngest) {
2325 USE(lru_youngest,
2326 lru_youngest->younger = newyb;);
2327 } else {
2328 lru_oldest = newyb;
2331 USE(newyb,
2332 newyb->older = lru_youngest;);
2334 lru_youngest = newyb;
2336 LRUCHECK;
2338 if(retyb)
2339 *retyb = newyb;
2341 return OK;
2344 /*===========================================================================*
2345 * do_forgetblocks *
2346 *===========================================================================*/
2347 int do_forgetblocks(message *m)
2349 int n;
2350 struct vmproc *vmp;
2351 endpoint_t caller = m->m_source;
2353 if(vm_isokendpt(caller, &n) != OK)
2354 panic("do_yield_block: message from strange source: %d",
2355 m->m_source);
2357 vmp = &vmproc[n];
2359 free_yielded_proc(vmp);
2361 return OK;
2364 /*===========================================================================*
2365 * do_forgetblock *
2366 *===========================================================================*/
2367 int do_forgetblock(message *m)
2369 int n;
2370 struct vmproc *vmp;
2371 endpoint_t caller = m->m_source;
2372 yielded_t *yb;
2373 u64_t id;
2374 block_id_t blockid;
2375 yielded_avl *avl;
2377 if(vm_isokendpt(caller, &n) != OK)
2378 panic("do_yield_block: message from strange source: %d",
2379 m->m_source);
2381 vmp = &vmproc[n];
2383 id = make64(m->VMFB_IDLO, m->VMFB_IDHI);
2385 blockid.id = id;
2386 blockid.owner = vmp->vm_endpoint;
2387 avl = get_yielded_avl(blockid);
2388 if((yb = yielded_search(avl, blockid, AVL_EQUAL))) {
2389 freeyieldednode(yb, 1);
2392 return OK;
2395 /*===========================================================================*
2396 * do_yieldblockgetblock *
2397 *===========================================================================*/
2398 int do_yieldblockgetblock(message *m)
2400 u64_t yieldid, getid;
2401 int n;
2402 endpoint_t caller = m->m_source;
2403 struct vmproc *vmp;
2404 yielded_t *yb = NULL;
2405 int r = ESRCH;
2407 if(vm_isokendpt(caller, &n) != OK)
2408 panic("do_yieldblockgetblock: message from strange source: %d",
2409 m->m_source);
2411 vmp = &vmproc[n];
2413 if(m->VMYBGB_LEN != VM_PAGE_SIZE) {
2414 static int printed = 0;
2415 if(!printed) {
2416 printed = 1;
2417 printf("vm: secondary cache for non-page-sized blocks temporarily disabled\n");
2419 return ENOSYS;
2422 yieldid = make64(m->VMYBGB_YIELDIDLO, m->VMYBGB_YIELDIDHI);
2423 getid = make64(m->VMYBGB_GETIDLO, m->VMYBGB_GETIDHI);
2425 if(cmp64(yieldid, VM_BLOCKID_NONE) != 0) {
2426 /* A block was given to yield. */
2427 yieldblock(vmp, yieldid, (vir_bytes) m->VMYBGB_VADDR, &yb);
2430 if(cmp64(getid, VM_BLOCKID_NONE) != 0) {
2431 /* A block was given to get. */
2432 r = getblock(vmp, getid, (vir_bytes) m->VMYBGB_VADDR);
2435 return r;
2438 void map_setparent(struct vmproc *vmp)
2440 region_iter iter;
2441 struct vir_region *vr;
2442 region_start_iter_least(&vmp->vm_regions_avl, &iter);
2443 while((vr = region_get_iter(&iter))) {
2444 USE(vr, vr->parent = vmp;);
2445 region_incr_iter(&iter);