some coverity fixes.
[minix.git] / servers / vm / region.c
blob9ed75d779820452d8df4fa5104bc6def1d6df84e
2 #define _SYSTEM 1
4 #include <minix/com.h>
5 #include <minix/callnr.h>
6 #include <minix/type.h>
7 #include <minix/config.h>
8 #include <minix/const.h>
9 #include <minix/sysutil.h>
10 #include <minix/syslib.h>
11 #include <minix/debug.h>
12 #include <minix/bitmap.h>
13 #include <minix/hash.h>
15 #include <sys/mman.h>
17 #include <limits.h>
18 #include <string.h>
19 #include <errno.h>
20 #include <assert.h>
21 #include <stdint.h>
22 #include <memory.h>
23 #include <sys/param.h>
25 #include "vm.h"
26 #include "proto.h"
27 #include "util.h"
28 #include "glo.h"
29 #include "region.h"
30 #include "sanitycheck.h"
31 #include "physravl.h"
32 #include "memlist.h"
34 /* LRU list. */
35 static yielded_t *lru_youngest = NULL, *lru_oldest = NULL;
37 /* Should a physblock be mapped writable? */
38 #define WRITABLE(r, pb) \
39 (((r)->flags & VR_WRITABLE) && \
40 (((r)->flags & (VR_DIRECT | VR_SHARED)) || \
41 (pb)->refcount == 1))
43 static int map_new_physblock(struct vmproc *vmp, struct vir_region
44 *region, vir_bytes offset, vir_bytes length, phys_bytes what, u32_t
45 allocflags, int written);
47 static int map_ph_writept(struct vmproc *vmp, struct vir_region *vr,
48 struct phys_region *pr);
50 static phys_bytes freeyieldednode(yielded_t *node, int freemem);
52 static struct vir_region *map_copy_region(struct vmproc *vmp, struct
53 vir_region *vr);
55 static struct phys_region *map_clone_ph_block(struct vmproc *vmp,
56 struct vir_region *region, struct phys_region *ph, physr_iter *iter);
58 #if SANITYCHECKS
59 static void lrucheck(void);
60 #endif
62 /* hash table of yielded blocks */
63 #define YIELD_HASHSIZE 65536
64 static yielded_avl vm_yielded_blocks[YIELD_HASHSIZE];
66 static int avl_inited = 0;
68 void map_region_init(void)
70 int h;
71 assert(!avl_inited);
72 for(h = 0; h < YIELD_HASHSIZE; h++)
73 yielded_init(&vm_yielded_blocks[h]);
74 avl_inited = 1;
77 static yielded_avl *get_yielded_avl(block_id_t id)
79 u32_t h;
81 assert(avl_inited);
83 hash_i_64(id.owner, id.id, h);
84 h = h % YIELD_HASHSIZE;
86 assert(h >= 0);
87 assert(h < YIELD_HASHSIZE);
89 return &vm_yielded_blocks[h];
92 static char *map_name(struct vir_region *vr)
94 static char name[100];
95 char *typename, *tag;
96 int type = vr->flags & (VR_ANON|VR_DIRECT);
97 switch(type) {
98 case VR_ANON:
99 typename = "anonymous";
100 break;
101 case VR_DIRECT:
102 typename = "direct";
103 break;
104 default:
105 panic("unknown mapping type: %d", type);
108 switch(vr->tag) {
109 case VRT_TEXT:
110 tag = "text";
111 break;
112 case VRT_STACK:
113 tag = "stack";
114 break;
115 case VRT_HEAP:
116 tag = "heap";
117 break;
118 case VRT_NONE:
119 tag = "untagged";
120 break;
121 default:
122 tag = "unknown tag value";
123 break;
126 sprintf(name, "%s, %s", typename, tag);
128 return name;
131 void map_printregion(struct vmproc *vmp, struct vir_region *vr)
133 physr_iter iter;
134 struct phys_region *ph;
135 printf("map_printmap: map_name: %s\n", map_name(vr));
136 printf("\t%lx (len 0x%lx, %lukB), %p\n",
137 vr->vaddr, vr->length, vr->length/1024, map_name(vr));
138 printf("\t\tphysblocks:\n");
139 physr_start_iter_least(vr->phys, &iter);
140 while((ph = physr_get_iter(&iter))) {
141 printf("\t\t@ %lx (refs %d): phys 0x%lx\n",
142 (vr->vaddr + ph->offset),
143 ph->ph->refcount, ph->ph->phys);
144 physr_incr_iter(&iter);
148 /*===========================================================================*
149 * map_printmap *
150 *===========================================================================*/
151 void map_printmap(vmp)
152 struct vmproc *vmp;
154 struct vir_region *vr;
155 region_iter iter;
157 printf("memory regions in process %d:\n", vmp->vm_endpoint);
159 region_start_iter_least(&vmp->vm_regions_avl, &iter);
160 while((vr = region_get_iter(&iter))) {
161 map_printregion(vmp, vr);
162 region_incr_iter(&iter);
166 static struct vir_region *getnextvr(struct vir_region *vr)
168 struct vir_region *nextvr;
169 region_iter v_iter;
170 SLABSANE(vr);
171 region_start_iter(&vr->parent->vm_regions_avl, &v_iter, vr->vaddr, AVL_EQUAL);
172 assert(region_get_iter(&v_iter));
173 assert(region_get_iter(&v_iter) == vr);
174 region_incr_iter(&v_iter);
175 nextvr = region_get_iter(&v_iter);
176 if(!nextvr) return NULL;
177 SLABSANE(nextvr);
178 assert(vr->parent == nextvr->parent);
179 assert(vr->vaddr < nextvr->vaddr);
180 assert(vr->vaddr + vr->length <= nextvr->vaddr);
181 return nextvr;
184 #if SANITYCHECKS
186 /*===========================================================================*
187 * map_sanitycheck_pt *
188 *===========================================================================*/
189 static int map_sanitycheck_pt(struct vmproc *vmp,
190 struct vir_region *vr, struct phys_region *pr)
192 struct phys_block *pb = pr->ph;
193 int rw;
194 int r;
196 if(WRITABLE(vr, pb))
197 rw = PTF_WRITE;
198 else
199 rw = PTF_READ;
201 r = pt_writemap(vmp, &vmp->vm_pt, vr->vaddr + pr->offset,
202 pb->phys, VM_PAGE_SIZE, PTF_PRESENT | PTF_USER | rw, WMF_VERIFY);
204 if(r != OK) {
205 printf("proc %d phys_region 0x%lx sanity check failed\n",
206 vmp->vm_endpoint, pr->offset);
207 map_printregion(vmp, vr);
210 return r;
213 /*===========================================================================*
214 * map_sanitycheck *
215 *===========================================================================*/
216 void map_sanitycheck(char *file, int line)
218 struct vmproc *vmp;
220 lrucheck();
222 /* Macro for looping over all physical blocks of all regions of
223 * all processes.
225 #define ALLREGIONS(regioncode, physcode) \
226 for(vmp = vmproc; vmp < &vmproc[VMP_NR]; vmp++) { \
227 region_iter v_iter; \
228 struct vir_region *vr; \
229 if(!(vmp->vm_flags & VMF_INUSE)) \
230 continue; \
231 region_start_iter_least(&vmp->vm_regions_avl, &v_iter); \
232 while((vr = region_get_iter(&v_iter))) { \
233 physr_iter iter; \
234 struct phys_region *pr; \
235 regioncode; \
236 physr_start_iter_least(vr->phys, &iter); \
237 while((pr = physr_get_iter(&iter))) { \
238 physcode; \
239 physr_incr_iter(&iter); \
241 region_incr_iter(&v_iter); \
245 #define MYSLABSANE(s) MYASSERT(slabsane_f(__FILE__, __LINE__, s, sizeof(*(s))))
246 /* Basic pointers check. */
247 ALLREGIONS(MYSLABSANE(vr),MYSLABSANE(pr); MYSLABSANE(pr->ph);MYSLABSANE(pr->parent));
248 ALLREGIONS(/* MYASSERT(vr->parent == vmp) */,MYASSERT(pr->parent == vr););
250 /* Do counting for consistency check. */
251 ALLREGIONS(;,USE(pr->ph, pr->ph->seencount = 0;););
252 ALLREGIONS(;,USE(pr->ph, pr->ph->seencount++;);
253 if(pr->ph->seencount == 1) {
254 if(!(pr->parent->flags & VR_DIRECT)) {
255 MYASSERT(usedpages_add(pr->ph->phys,
256 VM_PAGE_SIZE) == OK);
261 /* Do consistency check. */
262 ALLREGIONS({ struct vir_region *nextvr = getnextvr(vr);
263 if(nextvr) {
264 MYASSERT(vr->vaddr < nextvr->vaddr);
265 MYASSERT(vr->vaddr + vr->length <= nextvr->vaddr);
268 MYASSERT(!(vr->vaddr % VM_PAGE_SIZE));,
269 if(pr->ph->refcount != pr->ph->seencount) {
270 map_printmap(vmp);
271 printf("ph in vr %p: 0x%lx refcount %u "
272 "but seencount %u\n",
273 vr, pr->offset,
274 pr->ph->refcount, pr->ph->seencount);
277 int n_others = 0;
278 struct phys_region *others;
279 if(pr->ph->refcount > 0) {
280 MYASSERT(pr->ph->firstregion);
281 if(pr->ph->refcount == 1) {
282 MYASSERT(pr->ph->firstregion == pr);
284 } else {
285 MYASSERT(!pr->ph->firstregion);
287 for(others = pr->ph->firstregion; others;
288 others = others->next_ph_list) {
289 MYSLABSANE(others);
290 MYASSERT(others->ph == pr->ph);
291 n_others++;
293 MYASSERT(pr->ph->refcount == n_others);
295 MYASSERT(pr->ph->refcount == pr->ph->seencount);
296 MYASSERT(!(pr->offset % VM_PAGE_SIZE)););
297 ALLREGIONS(,MYASSERT(map_sanitycheck_pt(vmp, vr, pr) == OK));
300 #define LRUCHECK lrucheck()
302 static void lrucheck(void)
304 yielded_t *list;
306 /* list is empty and ok if both ends point to null. */
307 if(!lru_youngest && !lru_oldest)
308 return;
310 /* if not, both should point to something. */
311 SLABSANE(lru_youngest);
312 SLABSANE(lru_oldest);
314 assert(!lru_youngest->younger);
315 assert(!lru_oldest->older);
317 for(list = lru_youngest; list; list = list->older) {
318 SLABSANE(list);
319 if(list->younger) {
320 SLABSANE(list->younger);
321 assert(list->younger->older == list);
322 } else assert(list == lru_youngest);
323 if(list->older) {
324 SLABSANE(list->older);
325 assert(list->older->younger == list);
326 } else assert(list == lru_oldest);
330 void blockstats(void)
332 yielded_t *list;
333 int blocks = 0;
334 phys_bytes mem = 0;
335 clock_t ticks;
336 int s;
338 s = getuptime(&ticks);
340 assert(s == OK);
342 LRUCHECK;
344 for(list = lru_youngest; list; list = list->older) {
345 mem += VM_PAGE_SIZE;
346 blocks++;
349 if(blocks > 0)
350 printf("%d blocks, %lukB; ", blocks, mem/1024);
352 printmemstats();
354 #else
355 #define LRUCHECK
356 #endif
359 /*=========================================================================*
360 * map_ph_writept *
361 *=========================================================================*/
362 static int map_ph_writept(struct vmproc *vmp, struct vir_region *vr,
363 struct phys_region *pr)
365 int rw;
366 struct phys_block *pb = pr->ph;
368 assert(!(vr->vaddr % VM_PAGE_SIZE));
369 assert(!(pr->offset % VM_PAGE_SIZE));
370 assert(pb->refcount > 0);
372 if(WRITABLE(vr, pb))
373 rw = PTF_WRITE;
374 else
375 rw = PTF_READ;
377 if(pt_writemap(vmp, &vmp->vm_pt, vr->vaddr + pr->offset,
378 pb->phys, VM_PAGE_SIZE, PTF_PRESENT | PTF_USER | rw,
379 #if SANITYCHECKS
380 !pr->written ? 0 :
381 #endif
382 WMF_OVERWRITE) != OK) {
383 printf("VM: map_writept: pt_writemap failed\n");
384 return ENOMEM;
387 #if SANITYCHECKS
388 USE(pr, pr->written = 1;);
389 #endif
391 return OK;
394 #define SLOT_FAIL ((vir_bytes) -1)
396 /*===========================================================================*
397 * region_find_slot_range *
398 *===========================================================================*/
399 static vir_bytes region_find_slot_range(struct vmproc *vmp,
400 vir_bytes minv, vir_bytes maxv, vir_bytes length)
402 struct vir_region *lastregion;
403 vir_bytes startv = 0;
404 int foundflag = 0;
405 region_iter iter;
407 SANITYCHECK(SCL_FUNCTIONS);
409 /* Length must be reasonable. */
410 assert(length > 0);
412 /* Special case: allow caller to set maxv to 0 meaning 'I want
413 * it to be mapped in right here.'
415 if(maxv == 0) {
416 maxv = minv + length;
418 /* Sanity check. */
419 if(maxv <= minv) {
420 printf("region_find_slot: minv 0x%lx and bytes 0x%lx\n",
421 minv, length);
422 return SLOT_FAIL;
426 /* Basic input sanity checks. */
427 assert(!(length % VM_PAGE_SIZE));
428 if(minv >= maxv) {
429 printf("VM: 1 minv: 0x%lx maxv: 0x%lx length: 0x%lx\n",
430 minv, maxv, length);
433 assert(minv < maxv);
435 if(minv + length > maxv)
436 return SLOT_FAIL;
438 #define FREEVRANGE_TRY(rangestart, rangeend) { \
439 vir_bytes frstart = (rangestart), frend = (rangeend); \
440 frstart = MAX(frstart, minv); \
441 frend = MIN(frend, maxv); \
442 if(frend > frstart && (frend - frstart) >= length) { \
443 startv = frend-length; \
444 foundflag = 1; \
447 #define FREEVRANGE(start, end) { \
448 assert(!foundflag); \
449 FREEVRANGE_TRY(((start)+VM_PAGE_SIZE), ((end)-VM_PAGE_SIZE)); \
450 if(!foundflag) { \
451 FREEVRANGE_TRY((start), (end)); \
455 /* find region after maxv. */
456 region_start_iter(&vmp->vm_regions_avl, &iter, maxv, AVL_GREATER_EQUAL);
457 lastregion = region_get_iter(&iter);
459 if(!lastregion) {
460 /* This is the free virtual address space after the last region. */
461 region_start_iter(&vmp->vm_regions_avl, &iter, maxv, AVL_LESS);
462 lastregion = region_get_iter(&iter);
463 FREEVRANGE(lastregion ?
464 lastregion->vaddr+lastregion->length : 0, VM_DATATOP);
467 if(!foundflag) {
468 struct vir_region *vr;
469 while((vr = region_get_iter(&iter)) && !foundflag) {
470 struct vir_region *nextvr;
471 region_decr_iter(&iter);
472 nextvr = region_get_iter(&iter);
473 FREEVRANGE(nextvr ? nextvr->vaddr+nextvr->length : 0,
474 vr->vaddr);
478 if(!foundflag) {
479 printf("VM: region_find_slot: no 0x%lx bytes found for %d between 0x%lx and 0x%lx\n",
480 length, vmp->vm_endpoint, minv, maxv);
481 util_stacktrace();
482 return SLOT_FAIL;
485 /* However we got it, startv must be in the requested range. */
486 assert(startv >= minv);
487 assert(startv < maxv);
488 assert(startv + length <= maxv);
490 /* remember this position as a hint for next time. */
491 vmp->vm_region_top = startv + length;
493 return startv;
496 /*===========================================================================*
497 * region_find_slot *
498 *===========================================================================*/
499 static vir_bytes region_find_slot(struct vmproc *vmp,
500 vir_bytes minv, vir_bytes maxv, vir_bytes length)
502 vir_bytes v, hint = vmp->vm_region_top;
504 /* use the top of the last inserted region as a minv hint if
505 * possible. remember that a zero maxv is a special case.
508 if(maxv && hint < maxv && hint >= minv) {
509 v = region_find_slot_range(vmp, minv, hint, length);
511 if(v != SLOT_FAIL)
512 return v;
515 return region_find_slot_range(vmp, minv, maxv, length);
518 struct vir_region *region_new(struct vmproc *vmp, vir_bytes startv, vir_bytes length, int flags)
520 physr_avl *phavl;
521 struct vir_region *newregion;
523 if(!(SLABALLOC(newregion))) {
524 printf("vm: region_new: could not allocate\n");
525 return NULL;
528 /* Fill in node details. */
529 USE(newregion,
530 newregion->vaddr = startv;
531 newregion->length = length;
532 newregion->flags = flags;
533 newregion->tag = VRT_NONE;
534 newregion->lower = newregion->higher = NULL;
535 newregion->parent = vmp;);
537 SLABALLOC(phavl);
538 if(!phavl) {
539 printf("VM: region_new: allocating phys avl failed\n");
540 SLABFREE(newregion);
541 return NULL;
543 USE(newregion, newregion->phys = phavl;);
544 physr_init(newregion->phys);
546 return newregion;
549 /*===========================================================================*
550 * map_page_region *
551 *===========================================================================*/
552 struct vir_region *map_page_region(vmp, minv, maxv, length,
553 what, flags, mapflags)
554 struct vmproc *vmp;
555 vir_bytes minv;
556 vir_bytes maxv;
557 vir_bytes length;
558 vir_bytes what;
559 u32_t flags;
560 int mapflags;
562 struct vir_region *newregion;
563 vir_bytes startv;
565 assert(!(length % VM_PAGE_SIZE));
567 SANITYCHECK(SCL_FUNCTIONS);
569 if((flags & VR_CONTIG) && !(mapflags & MF_PREALLOC)) {
570 printf("map_page_region: can't make contiguous allocation without preallocating\n");
571 return NULL;
574 startv = region_find_slot(vmp, minv, maxv, length);
575 if (startv == SLOT_FAIL)
576 return NULL;
578 /* Now we want a new region. */
579 if(!(newregion = region_new(vmp, startv, length, flags))) {
580 printf("VM: map_page_region: allocating region failed\n");
581 return NULL;
584 /* If we know what we're going to map to, map it right away. */
585 if(what != MAP_NONE) {
586 assert(!(what % VM_PAGE_SIZE));
587 assert(!(startv % VM_PAGE_SIZE));
588 assert(!(mapflags & MF_PREALLOC));
589 if(map_new_physblock(vmp, newregion, 0, length,
590 what, PAF_CLEAR, 0) != OK) {
591 printf("VM: map_new_physblock failed\n");
592 USE(newregion,
593 SLABFREE(newregion->phys););
594 SLABFREE(newregion);
595 return NULL;
599 if((flags & VR_ANON) && (mapflags & MF_PREALLOC)) {
600 if(map_handle_memory(vmp, newregion, 0, length, 1) != OK) {
601 printf("VM: map_page_region: prealloc failed\n");
602 USE(newregion,
603 SLABFREE(newregion->phys););
604 SLABFREE(newregion);
605 return NULL;
609 /* Pre-allocations should be uninitialized, but after that it's a
610 * different story.
612 USE(newregion, newregion->flags &= ~VR_UNINITIALIZED;);
614 /* Link it. */
615 region_insert(&vmp->vm_regions_avl, newregion);
617 #if SANITYCHECKS
618 assert(startv == newregion->vaddr);
620 struct vir_region *nextvr;
621 if((nextvr = getnextvr(newregion))) {
622 assert(newregion->vaddr < nextvr->vaddr);
625 #endif
627 SANITYCHECK(SCL_FUNCTIONS);
629 return newregion;
632 static struct phys_region *reset_physr_iter(struct vir_region *region,
633 physr_iter *iter, vir_bytes offset)
635 struct phys_region *ph;
637 physr_start_iter(region->phys, iter, offset, AVL_EQUAL);
638 ph = physr_get_iter(iter);
639 assert(ph);
640 assert(ph->offset == offset);
642 return ph;
645 /*===========================================================================*
646 * map_subfree *
647 *===========================================================================*/
648 static int map_subfree(struct vir_region *region,
649 vir_bytes start, vir_bytes len)
651 struct phys_region *pr;
652 physr_iter iter;
653 vir_bytes end = start+len;
655 int full = 0;
657 #if SANITYCHECKS
659 SLABSANE(region);
660 SLABSANE(region->phys);
661 physr_start_iter_least(region->phys, &iter);
662 while((pr = physr_get_iter(&iter))) {
663 struct phys_region *others;
664 struct phys_block *pb;
666 pb = pr->ph;
668 for(others = pb->firstregion; others;
669 others = others->next_ph_list) {
670 assert(others->ph == pb);
672 physr_incr_iter(&iter);
675 #endif
677 if(start == 0 && len == region->length)
678 full = 1;
680 physr_init_iter(&iter);
681 physr_start_iter(region->phys, &iter, start, AVL_GREATER_EQUAL);
682 while((pr = physr_get_iter(&iter))) {
683 physr_incr_iter(&iter);
684 if(pr->offset >= end)
685 break;
686 pb_unreferenced(region, pr, !full);
687 if(!full) {
688 physr_start_iter(region->phys, &iter,
689 pr->offset, AVL_GREATER_EQUAL);
691 SLABFREE(pr);
694 if(full)
695 physr_init(region->phys);
697 return OK;
700 /*===========================================================================*
701 * map_free *
702 *===========================================================================*/
703 static int map_free(struct vir_region *region)
705 int r;
707 if((r=map_subfree(region, 0, region->length)) != OK) {
708 printf("%d\n", __LINE__);
709 return r;
712 USE(region,
713 SLABFREE(region->phys););
714 SLABFREE(region);
716 return OK;
719 /*===========================================================================*
720 * yielded_block_cmp *
721 *===========================================================================*/
722 int yielded_block_cmp(struct block_id *id1, struct block_id *id2)
724 if(id1->owner < id2->owner)
725 return -1;
726 if(id1->owner > id2->owner)
727 return 1;
728 return cmp64(id1->id, id2->id);
732 /*===========================================================================*
733 * free_yielded_proc *
734 *===========================================================================*/
735 static vir_bytes free_yielded_proc(struct vmproc *vmp)
737 vir_bytes total = 0;
738 int h;
740 SANITYCHECK(SCL_FUNCTIONS);
742 /* Free associated regions. */
743 for(h = 0; h < YIELD_HASHSIZE && vmp->vm_yielded > 0; h++) {
744 yielded_t *yb;
745 yielded_iter iter;
746 yielded_avl *avl = &vm_yielded_blocks[h];
747 yielded_start_iter_least(avl, &iter);
748 while((yb = yielded_get_iter(&iter))) {
749 yielded_t *next_yb;
750 SLABSANE(yb);
751 yielded_incr_iter(&iter);
752 if(yb->id.owner != vmp->vm_endpoint)
753 continue;
754 next_yb = yielded_get_iter(&iter);
755 total += freeyieldednode(yb, 1);
756 /* the above removal invalidated our iter; restart it
757 * for the node we want to start at.
759 if(!next_yb) break;
760 yielded_start_iter(avl, &iter, next_yb->id, AVL_EQUAL);
761 assert(yielded_get_iter(&iter) == next_yb);
765 return total;
769 static phys_bytes freeyieldednode(yielded_t *node, int freemem)
771 yielded_t *older, *younger, *removed;
772 yielded_avl *avl;
773 int p;
775 SLABSANE(node);
777 LRUCHECK;
779 /* Update LRU. */
781 younger = node->younger;
782 older = node->older;
784 if(younger) {
785 SLABSANE(younger);
786 assert(younger->older == node);
787 USE(younger, younger->older = node->older;);
788 } else {
789 assert(node == lru_youngest);
790 lru_youngest = node->older;
793 if(older) {
794 SLABSANE(older);
795 assert(older->younger == node);
796 USE(older, older->younger = node->younger;);
797 } else {
798 assert(node == lru_oldest);
799 lru_oldest = node->younger;
802 LRUCHECK;
804 /* Update AVL. */
806 if(vm_isokendpt(node->id.owner, &p) != OK)
807 panic("out of date owner of yielded block %d", node->id.owner);
808 avl = get_yielded_avl(node->id);
809 removed = yielded_remove(avl, node->id);
810 assert(removed == node);
811 assert(vmproc[p].vm_yielded > 0);
812 vmproc[p].vm_yielded--;
814 /* Free associated memory if requested. */
816 if(freemem) {
817 free_mem(ABS2CLICK(node->physaddr), node->pages);
820 /* Free node. */
821 SLABFREE(node);
823 return VM_PAGE_SIZE;
826 /*========================================================================*
827 * free_yielded *
828 *========================================================================*/
829 vir_bytes free_yielded(vir_bytes max_bytes)
832 /* PRIVATE yielded_t *lru_youngest = NULL, *lru_oldest = NULL; */
833 vir_bytes freed = 0;
834 int blocks = 0;
836 while(freed < max_bytes && lru_oldest) {
837 SLABSANE(lru_oldest);
838 freed += freeyieldednode(lru_oldest, 1);
839 blocks++;
842 return freed;
845 /*========================================================================*
846 * map_free_proc *
847 *========================================================================*/
848 int map_free_proc(vmp)
849 struct vmproc *vmp;
851 struct vir_region *r;
853 while((r = region_search_root(&vmp->vm_regions_avl))) {
854 SANITYCHECK(SCL_DETAIL);
855 #if SANITYCHECKS
856 nocheck++;
857 #endif
858 region_remove(&vmp->vm_regions_avl, r->vaddr); /* For sanity checks. */
859 map_free(r);
860 #if SANITYCHECKS
861 nocheck--;
862 #endif
863 SANITYCHECK(SCL_DETAIL);
866 region_init(&vmp->vm_regions_avl);
868 /* Free associated yielded blocks. */
869 free_yielded_proc(vmp);
871 SANITYCHECK(SCL_FUNCTIONS);
873 return OK;
876 /*===========================================================================*
877 * map_lookup *
878 *===========================================================================*/
879 struct vir_region *map_lookup(vmp, offset, physr)
880 struct vmproc *vmp;
881 vir_bytes offset;
882 struct phys_region **physr;
884 struct vir_region *r;
886 SANITYCHECK(SCL_FUNCTIONS);
888 #if SANITYCHECKS
889 if(!region_search_root(&vmp->vm_regions_avl))
890 panic("process has no regions: %d", vmp->vm_endpoint);
891 #endif
893 if((r = region_search(&vmp->vm_regions_avl, offset, AVL_LESS_EQUAL))) {
894 vir_bytes ph;
895 if(offset >= r->vaddr && offset < r->vaddr + r->length) {
896 ph = offset - r->vaddr;
897 if(physr) {
898 *physr = physr_search(r->phys, ph, AVL_EQUAL);
899 assert((*physr)->offset == ph);
901 return r;
905 SANITYCHECK(SCL_FUNCTIONS);
907 return NULL;
910 static u32_t vrallocflags(u32_t flags)
912 u32_t allocflags = 0;
914 if(flags & VR_PHYS64K)
915 allocflags |= PAF_ALIGN64K;
916 if(flags & VR_LOWER16MB)
917 allocflags |= PAF_LOWER16MB;
918 if(flags & VR_LOWER1MB)
919 allocflags |= PAF_LOWER1MB;
920 if(flags & VR_CONTIG)
921 allocflags |= PAF_CONTIG;
923 return allocflags;
926 /*===========================================================================*
927 * map_new_physblock *
928 *===========================================================================*/
929 static int map_new_physblock(vmp, region, start_offset, length,
930 what_mem, allocflags, written)
931 struct vmproc *vmp;
932 struct vir_region *region;
933 vir_bytes start_offset;
934 vir_bytes length;
935 phys_bytes what_mem;
936 u32_t allocflags;
937 int written;
939 struct memlist *memlist, *ml;
940 int r;
941 vir_bytes mapped = 0;
942 vir_bytes offset = start_offset;
944 SANITYCHECK(SCL_FUNCTIONS);
946 assert(!(length % VM_PAGE_SIZE));
948 if((region->flags & VR_CONTIG) &&
949 (start_offset > 0 || length < region->length)) {
950 printf("VM: region length 0x%lx, offset 0x%lx length 0x%lx\n",
951 region->length, start_offset, length);
952 map_printmap(vmp);
953 printf("VM: map_new_physblock: non-full contig allocation requested\n");
954 return EFAULT;
957 /* Memory for new physical block. */
958 allocflags |= vrallocflags(region->flags);
960 if(allocflags & PAF_CONTIG) {
961 assert(what_mem == MAP_NONE);
962 if((what_mem = alloc_mem(length/VM_PAGE_SIZE, allocflags)) == NO_MEM) {
963 return ENOMEM;
965 what_mem = CLICK2ABS(what_mem);
966 allocflags &= ~PAF_CONTIG;
967 assert(what_mem != MAP_NONE);
970 if(!(memlist = alloc_mem_in_list(length, allocflags, what_mem))) {
971 printf("map_new_physblock: couldn't allocate\n");
972 return ENOMEM;
975 r = OK;
977 for(ml = memlist; ml; ml = ml->next) {
978 struct phys_region *newphysr = NULL;
979 struct phys_block *newpb = NULL;
981 /* Allocate things necessary for this chunk of memory. */
982 if(!(newpb = pb_new(ml->phys)) ||
983 !(newphysr = pb_reference(newpb, offset, region))) {
984 printf("map_new_physblock: no memory for the ph slabs\n");
985 assert(!newphysr);
986 if(newpb) SLABFREE(newpb);
987 r = ENOMEM;
988 break;
991 /* Update pagetable. */
992 if(map_ph_writept(vmp, region, newphysr) != OK) {
993 printf("map_new_physblock: map_ph_writept failed\n");
994 r = ENOMEM;
995 break;
998 offset += VM_PAGE_SIZE;
999 mapped += VM_PAGE_SIZE;
1002 if(r != OK) {
1003 offset = start_offset;
1004 /* Things did not go well. Undo everything. */
1005 for(ml = memlist; ml; ml = ml->next) {
1006 struct phys_region *physr;
1007 if((physr = physr_search(region->phys, offset,
1008 AVL_EQUAL))) {
1009 assert(physr->ph->refcount == 1);
1010 pb_unreferenced(region, physr, 1);
1011 SLABFREE(physr);
1013 offset += VM_PAGE_SIZE;
1015 } else assert(mapped == length);
1017 /* Always clean up the memlist itself, even if everything
1018 * worked we're not using the memlist nodes any more. And
1019 * the memory they reference is either freed above or in use.
1021 free_mem_list(memlist, 0);
1023 SANITYCHECK(SCL_FUNCTIONS);
1025 return r;
1028 /*===========================================================================*
1029 * map_clone_ph_block *
1030 *===========================================================================*/
1031 static struct phys_region *map_clone_ph_block(vmp, region, ph, iter)
1032 struct vmproc *vmp;
1033 struct vir_region *region;
1034 struct phys_region *ph;
1035 physr_iter *iter;
1037 vir_bytes offset;
1038 u32_t allocflags;
1039 phys_bytes physaddr;
1040 struct phys_region *newpr;
1041 int region_has_single_block;
1042 int written = 0;
1043 #if SANITYCHECKS
1044 written = ph->written;
1045 #endif
1046 SANITYCHECK(SCL_FUNCTIONS);
1048 /* Warning: this function will free the passed
1049 * phys_region *ph and replace it (in the same offset)
1050 * with another! So both the pointer to it
1051 * and any iterators over the phys_regions in the vir_region
1052 * will be invalid on successful return. (Iterators over
1053 * the vir_region could be invalid on unsuccessful return too.)
1056 /* This is only to be done if there is more than one copy. */
1057 assert(ph->ph->refcount > 1);
1059 /* This function takes a physical block, copies its contents
1060 * into newly allocated memory, and replaces the single physical
1061 * block by one or more physical blocks with refcount 1 with the
1062 * same contents as the original. In other words, a fragmentable
1063 * version of map_copy_ph_block().
1066 /* Remember where and how much. */
1067 offset = ph->offset;
1068 physaddr = ph->ph->phys;
1070 /* Now unlink the original physical block so we can replace
1071 * it with new ones.
1074 SLABSANE(ph);
1075 SLABSANE(ph->ph);
1076 assert(ph->ph->refcount > 1);
1077 pb_unreferenced(region, ph, 1);
1078 assert(ph->ph->refcount >= 1);
1079 SLABFREE(ph);
1081 SANITYCHECK(SCL_DETAIL);
1083 /* Put new free memory in. */
1084 allocflags = vrallocflags(region->flags);
1085 region_has_single_block = (offset == 0 && region->length == VM_PAGE_SIZE);
1086 assert(region_has_single_block || !(allocflags & PAF_CONTIG));
1087 assert(!(allocflags & PAF_CLEAR));
1089 if(map_new_physblock(vmp, region, offset, VM_PAGE_SIZE,
1090 MAP_NONE, allocflags, written) != OK) {
1091 /* XXX original range now gone. */
1092 printf("VM: map_clone_ph_block: map_new_physblock failed.\n");
1093 return NULL;
1096 /* Copy the block to the new memory.
1097 * Can only fail if map_new_physblock didn't do what we asked.
1099 if(copy_abs2region(physaddr, region, offset, VM_PAGE_SIZE) != OK)
1100 panic("copy_abs2region failed, no good reason for that");
1102 newpr = physr_search(region->phys, offset, AVL_EQUAL);
1103 assert(newpr);
1104 assert(newpr->offset == offset);
1106 if(iter) {
1107 physr_start_iter(region->phys, iter, offset, AVL_EQUAL);
1108 assert(physr_get_iter(iter) == newpr);
1111 SANITYCHECK(SCL_FUNCTIONS);
1113 return newpr;
1117 /*===========================================================================*
1118 * map_pf *
1119 *===========================================================================*/
1120 int map_pf(vmp, region, offset, write)
1121 struct vmproc *vmp;
1122 struct vir_region *region;
1123 vir_bytes offset;
1124 int write;
1126 vir_bytes virpage;
1127 struct phys_region *ph;
1128 int r = OK;
1130 assert(offset >= 0);
1131 assert(offset < region->length);
1133 assert(region->flags & VR_ANON);
1134 assert(!(region->vaddr % VM_PAGE_SIZE));
1136 virpage = offset - offset % VM_PAGE_SIZE;
1138 SANITYCHECK(SCL_FUNCTIONS);
1140 if((ph = physr_search(region->phys, offset, AVL_LESS_EQUAL)) &&
1141 (ph->offset <= offset && offset < ph->offset + VM_PAGE_SIZE)) {
1142 /* Pagefault in existing block. Do copy-on-write. */
1143 assert(write);
1144 assert(region->flags & VR_WRITABLE);
1145 assert(ph->ph->refcount > 0);
1147 if(WRITABLE(region, ph->ph)) {
1148 r = map_ph_writept(vmp, region, ph);
1149 if(r != OK)
1150 printf("map_ph_writept failed\n");
1151 } else {
1152 if(ph->ph->refcount > 0
1153 && ph->ph->share_flag != PBSH_COW) {
1154 printf("VM: write RO mapped pages.\n");
1155 return EFAULT;
1156 } else {
1157 if(!map_clone_ph_block(vmp, region, ph, NULL))
1158 r = ENOMEM;
1161 } else {
1162 /* Pagefault in non-existing block. Map in new block. */
1163 if(map_new_physblock(vmp, region, virpage,
1164 VM_PAGE_SIZE, MAP_NONE, PAF_CLEAR, 0) != OK) {
1165 printf("map_new_physblock failed\n");
1166 r = ENOMEM;
1170 SANITYCHECK(SCL_FUNCTIONS);
1172 if(r != OK) {
1173 printf("VM: map_pf: failed (%d)\n", r);
1174 return r;
1177 #if SANITYCHECKS
1178 if(OK != pt_checkrange(&vmp->vm_pt, region->vaddr+virpage,
1179 VM_PAGE_SIZE, write)) {
1180 panic("map_pf: pt_checkrange failed: %d", r);
1182 #endif
1184 return r;
1187 /*===========================================================================*
1188 * map_pin_memory *
1189 *===========================================================================*/
1190 int map_pin_memory(struct vmproc *vmp)
1192 struct vir_region *vr;
1193 int r;
1194 region_iter iter;
1195 region_start_iter_least(&vmp->vm_regions_avl, &iter);
1196 /* Scan all memory regions. */
1197 while((vr = region_get_iter(&iter))) {
1198 /* Make sure region is mapped to physical memory and writable.*/
1199 r = map_handle_memory(vmp, vr, 0, vr->length, 1);
1200 if(r != OK) {
1201 panic("map_pin_memory: map_handle_memory failed: %d", r);
1203 region_incr_iter(&iter);
1205 return OK;
1208 /*===========================================================================*
1209 * map_handle_memory *
1210 *===========================================================================*/
1211 int map_handle_memory(vmp, region, offset, length, write)
1212 struct vmproc *vmp;
1213 struct vir_region *region;
1214 vir_bytes offset, length;
1215 int write;
1217 struct phys_region *physr, *nextphysr;
1218 int changes = 0;
1219 physr_iter iter;
1220 u32_t allocflags = 0;
1222 if(!(region->flags & VR_UNINITIALIZED)) {
1223 allocflags = PAF_CLEAR;
1226 #define FREE_RANGE_HERE(er1, er2) { \
1227 struct phys_region *r1 = (er1), *r2 = (er2); \
1228 vir_bytes start = offset, end = offset + length; \
1229 if(r1) { \
1230 start = MAX(start, r1->offset + VM_PAGE_SIZE); } \
1231 if(r2) { \
1232 end = MIN(end, r2->offset); } \
1233 if(start < end) { \
1234 SANITYCHECK(SCL_DETAIL); \
1235 if(map_new_physblock(vmp, region, start, \
1236 end-start, MAP_NONE, allocflags, 0) != OK) { \
1237 SANITYCHECK(SCL_DETAIL); \
1238 return ENOMEM; \
1240 changes++; \
1244 SANITYCHECK(SCL_FUNCTIONS);
1246 assert(region->flags & VR_ANON);
1247 assert(!(region->vaddr % VM_PAGE_SIZE));
1248 assert(!(offset % VM_PAGE_SIZE));
1249 assert(!(length % VM_PAGE_SIZE));
1250 assert(!write || (region->flags & VR_WRITABLE));
1252 physr_start_iter(region->phys, &iter, offset, AVL_LESS_EQUAL);
1253 physr = physr_get_iter(&iter);
1255 if(!physr) {
1256 physr_start_iter(region->phys, &iter, offset, AVL_GREATER_EQUAL);
1257 physr = physr_get_iter(&iter);
1260 FREE_RANGE_HERE(NULL, physr);
1262 if(physr) {
1263 physr = reset_physr_iter(region, &iter, physr->offset);
1264 if(physr->offset + VM_PAGE_SIZE <= offset) {
1265 physr_incr_iter(&iter);
1266 physr = physr_get_iter(&iter);
1268 FREE_RANGE_HERE(NULL, physr);
1269 if(physr) {
1270 physr = reset_physr_iter(region, &iter,
1271 physr->offset);
1276 while(physr) {
1277 int r;
1279 SANITYCHECK(SCL_DETAIL);
1281 if(write) {
1282 assert(physr->ph->refcount > 0);
1283 if(!WRITABLE(region, physr->ph)) {
1284 if(!(physr = map_clone_ph_block(vmp, region,
1285 physr, &iter))) {
1286 printf("VM: map_handle_memory: no copy\n");
1287 return ENOMEM;
1289 changes++;
1290 } else {
1291 SANITYCHECK(SCL_DETAIL);
1292 if((r=map_ph_writept(vmp, region, physr)) != OK) {
1293 printf("VM: map_ph_writept failed\n");
1294 return r;
1296 changes++;
1297 SANITYCHECK(SCL_DETAIL);
1301 SANITYCHECK(SCL_DETAIL);
1302 physr_incr_iter(&iter);
1303 nextphysr = physr_get_iter(&iter);
1304 FREE_RANGE_HERE(physr, nextphysr);
1305 SANITYCHECK(SCL_DETAIL);
1306 if(nextphysr) {
1307 if(nextphysr->offset >= offset + length)
1308 break;
1309 nextphysr = reset_physr_iter(region, &iter,
1310 nextphysr->offset);
1312 physr = nextphysr;
1315 SANITYCHECK(SCL_FUNCTIONS);
1317 if(changes < 1) {
1318 #if VERBOSE
1319 printf("region start at 0x%lx offset 0x%lx len 0x%lx write %d\n",
1320 region->vaddr, offset, length, write);
1321 printf("no changes in map_handle_memory\n");
1322 #endif
1323 return EFAULT;
1326 #if SANITYCHECKS
1327 if(OK != pt_checkrange(&vmp->vm_pt, region->vaddr+offset, length, write)) {
1328 printf("handle mem 0x%lx-0x%lx failed\n",
1329 region->vaddr+offset,region->vaddr+offset+length);
1330 map_printregion(vmp, region);
1331 panic("checkrange failed");
1333 #endif
1335 return OK;
1338 #if SANITYCHECKS
1339 static int count_phys_regions(struct vir_region *vr)
1341 int n = 0;
1342 struct phys_region *ph;
1343 physr_iter iter;
1344 physr_start_iter_least(vr->phys, &iter);
1345 while((ph = physr_get_iter(&iter))) {
1346 n++;
1347 physr_incr_iter(&iter);
1349 return n;
1351 #endif
1353 /*===========================================================================*
1354 * map_copy_region *
1355 *===========================================================================*/
1356 static struct vir_region *map_copy_region(struct vmproc *vmp, struct vir_region *vr)
1358 /* map_copy_region creates a complete copy of the vir_region
1359 * data structure, linking in the same phys_blocks directly,
1360 * but all in limbo, i.e., the caller has to link the vir_region
1361 * to a process. Therefore it doesn't increase the refcount in
1362 * the phys_block; the caller has to do this once it's linked.
1363 * The reason for this is to keep the sanity checks working
1364 * within this function.
1366 struct vir_region *newvr;
1367 struct phys_region *ph;
1368 physr_iter iter;
1369 #if SANITYCHECKS
1370 int cr;
1371 cr = count_phys_regions(vr);
1372 #endif
1374 if(!(newvr = region_new(vr->parent, vr->vaddr, vr->length, vr->flags)))
1375 return NULL;
1377 physr_start_iter_least(vr->phys, &iter);
1378 while((ph = physr_get_iter(&iter))) {
1379 struct phys_region *newph = pb_reference(ph->ph, ph->offset, newvr);
1381 if(!newph) { map_free(newvr); return NULL; }
1383 #if SANITYCHECKS
1384 USE(newph, newph->written = 0;);
1385 assert(count_phys_regions(vr) == cr);
1386 #endif
1387 physr_incr_iter(&iter);
1390 #if SANITYCHECKS
1391 assert(count_phys_regions(vr) == count_phys_regions(newvr));
1392 #endif
1394 return newvr;
1397 /*===========================================================================*
1398 * copy_abs2region *
1399 *===========================================================================*/
1400 int copy_abs2region(phys_bytes abs, struct vir_region *destregion,
1401 phys_bytes offset, phys_bytes len)
1404 assert(destregion);
1405 assert(destregion->phys);
1406 while(len > 0) {
1407 phys_bytes sublen, suboffset;
1408 struct phys_region *ph;
1409 assert(destregion);
1410 assert(destregion->phys);
1411 if(!(ph = physr_search(destregion->phys, offset, AVL_LESS_EQUAL))) {
1412 printf("VM: copy_abs2region: no phys region found (1).\n");
1413 return EFAULT;
1415 assert(ph->offset <= offset);
1416 if(ph->offset+VM_PAGE_SIZE <= offset) {
1417 printf("VM: copy_abs2region: no phys region found (2).\n");
1418 return EFAULT;
1420 suboffset = offset - ph->offset;
1421 assert(suboffset < VM_PAGE_SIZE);
1422 sublen = len;
1423 if(sublen > VM_PAGE_SIZE - suboffset)
1424 sublen = VM_PAGE_SIZE - suboffset;
1425 assert(suboffset + sublen <= VM_PAGE_SIZE);
1426 if(ph->ph->refcount != 1) {
1427 printf("VM: copy_abs2region: refcount not 1.\n");
1428 return EFAULT;
1431 if(sys_abscopy(abs, ph->ph->phys + suboffset, sublen) != OK) {
1432 printf("VM: copy_abs2region: abscopy failed.\n");
1433 return EFAULT;
1435 abs += sublen;
1436 offset += sublen;
1437 len -= sublen;
1440 return OK;
1443 /*=========================================================================*
1444 * map_writept *
1445 *=========================================================================*/
1446 int map_writept(struct vmproc *vmp)
1448 struct vir_region *vr;
1449 struct phys_region *ph;
1450 int r;
1451 region_iter v_iter;
1452 region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
1454 while((vr = region_get_iter(&v_iter))) {
1455 physr_iter ph_iter;
1456 physr_start_iter_least(vr->phys, &ph_iter);
1458 while((ph = physr_get_iter(&ph_iter))) {
1459 physr_incr_iter(&ph_iter);
1461 /* If this phys block is shared as SMAP, then do
1462 * not update the page table. */
1463 if(ph->ph->refcount > 1
1464 && ph->ph->share_flag == PBSH_SMAP) {
1465 continue;
1468 if((r=map_ph_writept(vmp, vr, ph)) != OK) {
1469 printf("VM: map_writept: failed\n");
1470 return r;
1473 region_incr_iter(&v_iter);
1476 return OK;
1479 /*========================================================================*
1480 * map_proc_copy *
1481 *========================================================================*/
1482 int map_proc_copy(dst, src)
1483 struct vmproc *dst;
1484 struct vmproc *src;
1486 /* Copy all the memory regions from the src process to the dst process. */
1487 region_init(&dst->vm_regions_avl);
1489 return map_proc_copy_from(dst, src, NULL);
1492 /*========================================================================*
1493 * map_proc_copy_from *
1494 *========================================================================*/
1495 int map_proc_copy_from(dst, src, start_src_vr)
1496 struct vmproc *dst;
1497 struct vmproc *src;
1498 struct vir_region *start_src_vr;
1500 struct vir_region *vr;
1501 region_iter v_iter;
1503 if(!start_src_vr)
1504 start_src_vr = region_search_least(&src->vm_regions_avl);
1506 assert(start_src_vr);
1507 assert(start_src_vr->parent == src);
1508 region_start_iter(&src->vm_regions_avl, &v_iter,
1509 start_src_vr->vaddr, AVL_EQUAL);
1510 assert(region_get_iter(&v_iter) == start_src_vr);
1512 /* Copy source regions after the destination's last region (if any). */
1514 SANITYCHECK(SCL_FUNCTIONS);
1516 while((vr = region_get_iter(&v_iter))) {
1517 physr_iter iter_orig, iter_new;
1518 struct vir_region *newvr;
1519 struct phys_region *orig_ph, *new_ph;
1520 if(!(newvr = map_copy_region(dst, vr))) {
1521 map_free_proc(dst);
1522 return ENOMEM;
1524 USE(newvr, newvr->parent = dst;);
1525 region_insert(&dst->vm_regions_avl, newvr);
1526 physr_start_iter_least(vr->phys, &iter_orig);
1527 physr_start_iter_least(newvr->phys, &iter_new);
1528 while((orig_ph = physr_get_iter(&iter_orig))) {
1529 struct phys_block *pb;
1530 new_ph = physr_get_iter(&iter_new);
1531 /* Check two physregions both are nonnull,
1532 * are different, and match physblocks.
1534 assert(new_ph);
1535 assert(orig_ph);
1536 assert(orig_ph != new_ph);
1537 pb = orig_ph->ph;
1538 assert(orig_ph->ph == new_ph->ph);
1540 /* If the phys block has been shared as SMAP,
1541 * do the regular copy. */
1542 if(pb->refcount > 2 && pb->share_flag == PBSH_SMAP) {
1543 map_clone_ph_block(dst, newvr,new_ph,
1544 &iter_new);
1545 } else {
1546 USE(pb, pb->share_flag = PBSH_COW;);
1549 /* Get next new physregion */
1550 physr_incr_iter(&iter_orig);
1551 physr_incr_iter(&iter_new);
1553 assert(!physr_get_iter(&iter_new));
1554 region_incr_iter(&v_iter);
1557 map_writept(src);
1558 map_writept(dst);
1560 SANITYCHECK(SCL_FUNCTIONS);
1561 return OK;
1564 int map_region_extend_upto_v(struct vmproc *vmp, vir_bytes v)
1566 vir_bytes offset = v, end;
1567 struct vir_region *vr, *nextvr;
1568 int r = OK;
1570 if(!(vr = region_search(&vmp->vm_regions_avl, offset, AVL_LESS))) {
1571 printf("VM: nothing to extend\n");
1572 return ENOMEM;
1575 if(!(vr->flags & VR_ANON)) {
1576 printf("VM: memory range to extend not anonymous\n");
1577 return ENOMEM;
1580 assert(vr->vaddr <= offset);
1581 if((nextvr = getnextvr(vr))) {
1582 assert(offset <= nextvr->vaddr);
1585 end = vr->vaddr + vr->length;
1587 offset = roundup(offset, VM_PAGE_SIZE);
1589 if(end < offset)
1590 r = map_region_extend(vmp, vr, offset - end);
1592 return r;
1595 /*========================================================================*
1596 * map_region_extend *
1597 *========================================================================*/
1598 int map_region_extend(struct vmproc *vmp, struct vir_region *vr,
1599 vir_bytes delta)
1601 vir_bytes end;
1602 struct vir_region *nextvr;
1604 assert(vr);
1605 assert(vr->flags & VR_ANON);
1606 assert(!(delta % VM_PAGE_SIZE));
1607 if(vr->flags & VR_CONTIG) {
1608 printf("VM: can't grow contig region\n");
1609 return EFAULT;
1612 if(!delta) return OK;
1613 end = vr->vaddr + vr->length;
1614 assert(end >= vr->vaddr);
1616 if(end + delta <= end) {
1617 printf("VM: strange delta 0x%lx\n", delta);
1618 return ENOMEM;
1621 nextvr = getnextvr(vr);
1623 if(!nextvr || end + delta <= nextvr->vaddr) {
1624 USE(vr, vr->length += delta;);
1625 return OK;
1628 return ENOMEM;
1631 /*========================================================================*
1632 * map_region_shrink *
1633 *========================================================================*/
1634 int map_region_shrink(struct vir_region *vr, vir_bytes delta)
1636 assert(vr);
1637 assert(vr->flags & VR_ANON);
1638 assert(!(delta % VM_PAGE_SIZE));
1640 #if 0
1641 printf("VM: ignoring region shrink\n");
1642 #endif
1644 return OK;
1647 struct vir_region *map_region_lookup_tag(vmp, tag)
1648 struct vmproc *vmp;
1649 u32_t tag;
1651 struct vir_region *vr;
1652 region_iter v_iter;
1653 region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
1655 while((vr = region_get_iter(&v_iter))) {
1656 if(vr->tag == tag)
1657 return vr;
1658 region_incr_iter(&v_iter);
1661 return NULL;
1664 void map_region_set_tag(struct vir_region *vr, u32_t tag)
1666 USE(vr, vr->tag = tag;);
1669 u32_t map_region_get_tag(struct vir_region *vr)
1671 return vr->tag;
1674 /*========================================================================*
1675 * map_unmap_region *
1676 *========================================================================*/
1677 int map_unmap_region(struct vmproc *vmp, struct vir_region *r,
1678 vir_bytes offset, vir_bytes len)
1680 /* Shrink the region by 'len' bytes, from the start. Unreference
1681 * memory it used to reference if any.
1683 vir_bytes regionstart;
1685 SANITYCHECK(SCL_FUNCTIONS);
1687 if(offset+len > r->length || (len % VM_PAGE_SIZE)) {
1688 printf("VM: bogus length 0x%lx\n", len);
1689 return EINVAL;
1692 if(!(r->flags & (VR_ANON|VR_DIRECT))) {
1693 printf("VM: only unmap anonymous or direct memory\n");
1694 return EINVAL;
1697 regionstart = r->vaddr + offset;
1699 /* unreference its memory */
1700 map_subfree(r, offset, len);
1702 /* if unmap was at start/end of this region, it actually shrinks */
1703 if(offset == 0) {
1704 struct phys_region *pr;
1705 physr_iter iter;
1707 region_remove(&vmp->vm_regions_avl, r->vaddr);
1709 USE(r,
1710 r->vaddr += len;
1711 r->length -= len;);
1713 region_insert(&vmp->vm_regions_avl, r);
1715 /* vaddr has increased; to make all the phys_regions
1716 * point to the same addresses, make them shrink by the
1717 * same amount.
1719 physr_init_iter(&iter);
1720 physr_start_iter(r->phys, &iter, offset, AVL_GREATER_EQUAL);
1722 while((pr = physr_get_iter(&iter))) {
1723 assert(pr->offset >= offset);
1724 USE(pr, pr->offset -= len;);
1725 physr_incr_iter(&iter);
1727 } else if(offset + len == r->length) {
1728 assert(len <= r->length);
1729 r->length -= len;
1732 if(r->length == 0) {
1733 /* Whole region disappears. Unlink and free it. */
1734 region_remove(&vmp->vm_regions_avl, r->vaddr);
1735 map_free(r);
1738 SANITYCHECK(SCL_DETAIL);
1740 if(pt_writemap(vmp, &vmp->vm_pt, regionstart,
1741 MAP_NONE, len, 0, WMF_OVERWRITE) != OK) {
1742 printf("VM: map_unmap_region: pt_writemap failed\n");
1743 return ENOMEM;
1746 SANITYCHECK(SCL_FUNCTIONS);
1748 return OK;
1751 /*========================================================================*
1752 * map_remap *
1753 *========================================================================*/
1754 int map_remap(struct vmproc *dvmp, vir_bytes da, size_t size,
1755 struct vir_region *region, vir_bytes *r, int readonly)
1757 struct vir_region *vr;
1758 struct phys_region *ph;
1759 vir_bytes startv, dst_addr;
1760 physr_iter iter;
1762 SANITYCHECK(SCL_FUNCTIONS);
1764 assert(region->flags & VR_SHARED);
1766 /* da is handled differently */
1767 if (!da)
1768 dst_addr = 0;
1769 else
1770 dst_addr = da;
1772 /* round up to page size */
1773 assert(!(size % VM_PAGE_SIZE));
1774 startv = region_find_slot(dvmp, dst_addr, VM_DATATOP, size);
1775 if (startv == SLOT_FAIL) {
1776 return ENOMEM;
1778 /* when the user specifies the address, we cannot change it */
1779 if (da && (startv != dst_addr))
1780 return EINVAL;
1782 vr = map_copy_region(dvmp, region);
1783 if(!vr)
1784 return ENOMEM;
1786 USE(vr,
1787 vr->vaddr = startv;
1788 vr->length = size;
1789 vr->flags = region->flags;
1790 vr->tag = VRT_NONE;
1791 vr->parent = dvmp;
1792 if(readonly) {
1793 vr->flags &= ~VR_WRITABLE;
1796 assert(vr->flags & VR_SHARED);
1798 region_insert(&dvmp->vm_regions_avl, vr);
1800 physr_start_iter_least(vr->phys, &iter);
1801 while((ph = physr_get_iter(&iter))) {
1802 if(map_ph_writept(dvmp, vr, ph) != OK) {
1803 panic("map_remap: map_ph_writept failed");
1805 physr_incr_iter(&iter);
1808 *r = startv;
1810 SANITYCHECK(SCL_FUNCTIONS);
1812 return OK;
1815 /*========================================================================*
1816 * map_get_phys *
1817 *========================================================================*/
1818 int map_get_phys(struct vmproc *vmp, vir_bytes addr, phys_bytes *r)
1820 struct vir_region *vr;
1821 struct phys_region *ph;
1822 physr_iter iter;
1824 if (!(vr = map_lookup(vmp, addr, NULL)) ||
1825 (vr->vaddr != addr))
1826 return EINVAL;
1828 if (!(vr->flags & VR_SHARED))
1829 return EINVAL;
1831 physr_start_iter_least(vr->phys, &iter);
1832 ph = physr_get_iter(&iter);
1834 assert(ph);
1835 assert(ph->ph);
1836 if (r)
1837 *r = ph->ph->phys;
1839 return OK;
1842 /*========================================================================*
1843 * map_get_ref *
1844 *========================================================================*/
1845 int map_get_ref(struct vmproc *vmp, vir_bytes addr, u8_t *cnt)
1847 struct vir_region *vr;
1848 struct phys_region *ph;
1849 physr_iter iter;
1851 if (!(vr = map_lookup(vmp, addr, NULL)) ||
1852 (vr->vaddr != addr))
1853 return EINVAL;
1855 if (!(vr->flags & VR_SHARED))
1856 return EINVAL;
1858 physr_start_iter_least(vr->phys, &iter);
1859 ph = physr_get_iter(&iter);
1861 assert(ph);
1862 assert(ph->ph);
1863 if (cnt)
1864 *cnt = ph->ph->refcount;
1866 return OK;
1869 /*========================================================================*
1870 * get_stats_info *
1871 *========================================================================*/
1872 void get_stats_info(struct vm_stats_info *vsi)
1874 yielded_t *yb;
1876 vsi->vsi_cached = 0L;
1878 for(yb = lru_youngest; yb; yb = yb->older)
1879 vsi->vsi_cached++;
1882 /*========================================================================*
1883 * get_usage_info *
1884 *========================================================================*/
1885 void get_usage_info(struct vmproc *vmp, struct vm_usage_info *vui)
1887 struct vir_region *vr;
1888 physr_iter iter;
1889 struct phys_region *ph;
1890 region_iter v_iter;
1891 region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
1893 memset(vui, 0, sizeof(*vui));
1895 while((vr = region_get_iter(&v_iter))) {
1896 physr_start_iter_least(vr->phys, &iter);
1897 while((ph = physr_get_iter(&iter))) {
1898 /* All present pages are counted towards the total. */
1899 vui->vui_total += VM_PAGE_SIZE;
1901 if (ph->ph->refcount > 1) {
1902 /* Any page with a refcount > 1 is common. */
1903 vui->vui_common += VM_PAGE_SIZE;
1905 /* Any common, non-COW page is shared. */
1906 if (vr->flags & VR_SHARED ||
1907 ph->ph->share_flag == PBSH_SMAP)
1908 vui->vui_shared += VM_PAGE_SIZE;
1910 physr_incr_iter(&iter);
1912 region_incr_iter(&v_iter);
1916 /*===========================================================================*
1917 * get_region_info *
1918 *===========================================================================*/
1919 int get_region_info(struct vmproc *vmp, struct vm_region_info *vri,
1920 int max, vir_bytes *nextp)
1922 struct vir_region *vr;
1923 vir_bytes next;
1924 int count;
1925 region_iter v_iter;
1927 next = *nextp;
1929 if (!max) return 0;
1931 region_start_iter(&vmp->vm_regions_avl, &v_iter, next, AVL_GREATER_EQUAL);
1932 if(!(vr = region_get_iter(&v_iter))) return 0;
1934 for(count = 0; (vr = region_get_iter(&v_iter)) && count < max; count++, vri++) {
1935 struct phys_region *ph1, *ph2;
1937 /* Report part of the region that's actually in use. */
1939 /* Get first and last phys_regions, if any */
1940 ph1 = physr_search_least(vr->phys);
1941 ph2 = physr_search_greatest(vr->phys);
1942 if(!ph1 || !ph2) { assert(!ph1 && !ph2); continue; }
1944 /* Report start+length of region starting from lowest use. */
1945 vri->vri_addr = vr->vaddr + ph1->offset;
1946 vri->vri_prot = 0;
1947 vri->vri_length = ph2->offset + VM_PAGE_SIZE - ph1->offset;
1949 /* "AND" the provided protection with per-page protection. */
1950 if (!(vr->flags & VR_WRITABLE))
1951 vri->vri_prot &= ~PROT_WRITE;
1953 vri->vri_flags = (vr->flags & VR_SHARED) ? MAP_IPC_SHARED : 0;
1955 next = vr->vaddr + vr->length;
1956 region_incr_iter(&v_iter);
1959 *nextp = next;
1960 return count;
1963 /*========================================================================*
1964 * regionprintstats *
1965 *========================================================================*/
1966 void printregionstats(struct vmproc *vmp)
1968 struct vir_region *vr;
1969 struct phys_region *pr;
1970 physr_iter iter;
1971 vir_bytes used = 0, weighted = 0;
1972 region_iter v_iter;
1973 region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
1975 while((vr = region_get_iter(&v_iter))) {
1976 region_incr_iter(&v_iter);
1977 if(vr->flags & VR_DIRECT)
1978 continue;
1979 physr_start_iter_least(vr->phys, &iter);
1980 while((pr = physr_get_iter(&iter))) {
1981 physr_incr_iter(&iter);
1982 used += VM_PAGE_SIZE;
1983 weighted += VM_PAGE_SIZE / pr->ph->refcount;
1987 printf("%6lukB %6lukB\n", used/1024, weighted/1024);
1989 return;
1992 /*===========================================================================*
1993 * do_map_memory *
1994 *===========================================================================*/
1995 static int do_map_memory(struct vmproc *vms, struct vmproc *vmd,
1996 struct vir_region *vrs, struct vir_region *vrd,
1997 vir_bytes offset_s, vir_bytes offset_d,
1998 vir_bytes length, int flag)
2000 struct phys_region *prs;
2001 struct phys_region *newphysr;
2002 struct phys_block *pb;
2003 physr_iter iter;
2004 u32_t pt_flag = PTF_PRESENT | PTF_USER;
2005 vir_bytes end;
2007 /* Search for the first phys region in the source process. */
2008 physr_start_iter(vrs->phys, &iter, offset_s, AVL_EQUAL);
2009 prs = physr_get_iter(&iter);
2010 if(!prs)
2011 panic("do_map_memory: no aligned phys region: %d", 0);
2013 /* flag: 0 -> read-only
2014 * 1 -> writable
2015 * -1 -> share as COW, so read-only
2017 if(flag > 0)
2018 pt_flag |= PTF_WRITE;
2019 else
2020 pt_flag |= PTF_READ;
2022 /* Map phys blocks in the source process to the destination process. */
2023 end = offset_d + length;
2024 while((prs = physr_get_iter(&iter)) && offset_d < end) {
2025 /* If a SMAP share was requested but the phys block has already
2026 * been shared as COW, copy the block for the source phys region
2027 * first.
2029 pb = prs->ph;
2030 if(flag >= 0 && pb->refcount > 1
2031 && pb->share_flag == PBSH_COW) {
2032 if(!(prs = map_clone_ph_block(vms, vrs, prs, &iter)))
2033 return ENOMEM;
2034 pb = prs->ph;
2037 /* Allocate a new phys region. */
2038 if(!(newphysr = pb_reference(pb, offset_d, vrd)))
2039 return ENOMEM;
2041 /* If a COW share was requested but the phys block has already
2042 * been shared as SMAP, give up on COW and copy the block for
2043 * the destination phys region now.
2045 if(flag < 0 && pb->refcount > 1
2046 && pb->share_flag == PBSH_SMAP) {
2047 if(!(newphysr = map_clone_ph_block(vmd, vrd,
2048 newphysr, NULL))) {
2049 return ENOMEM;
2052 else {
2053 /* See if this is a COW share or SMAP share. */
2054 if(flag < 0) { /* COW share */
2055 pb->share_flag = PBSH_COW;
2056 /* Update the page table for the src process. */
2057 pt_writemap(vms, &vms->vm_pt, offset_s + vrs->vaddr,
2058 pb->phys, VM_PAGE_SIZE,
2059 pt_flag, WMF_OVERWRITE);
2061 else { /* SMAP share */
2062 pb->share_flag = PBSH_SMAP;
2064 /* Update the page table for the destination process. */
2065 pt_writemap(vmd, &vmd->vm_pt, offset_d + vrd->vaddr,
2066 pb->phys, VM_PAGE_SIZE, pt_flag, WMF_OVERWRITE);
2069 physr_incr_iter(&iter);
2070 offset_d += VM_PAGE_SIZE;
2071 offset_s += VM_PAGE_SIZE;
2073 return OK;
2076 /*===========================================================================*
2077 * unmap_memory *
2078 *===========================================================================*/
2079 int unmap_memory(endpoint_t sour, endpoint_t dest,
2080 vir_bytes virt_s, vir_bytes virt_d, vir_bytes length, int flag)
2082 struct vmproc *vmd;
2083 struct vir_region *vrd;
2084 struct phys_region *pr;
2085 struct phys_block *pb;
2086 physr_iter iter;
2087 vir_bytes off, end;
2088 int p;
2090 /* Use information on the destination process to unmap. */
2091 if(vm_isokendpt(dest, &p) != OK)
2092 panic("unmap_memory: bad endpoint: %d", dest);
2093 vmd = &vmproc[p];
2095 vrd = map_lookup(vmd, virt_d, NULL);
2096 assert(vrd);
2098 /* Search for the first phys region in the destination process. */
2099 off = virt_d - vrd->vaddr;
2100 physr_start_iter(vrd->phys, &iter, off, AVL_EQUAL);
2101 pr = physr_get_iter(&iter);
2102 if(!pr)
2103 panic("unmap_memory: no aligned phys region: %d", 0);
2105 /* Copy the phys block now rather than doing COW. */
2106 end = off + length;
2107 while((pr = physr_get_iter(&iter)) && off < end) {
2108 pb = pr->ph;
2109 assert(pb->refcount > 1);
2110 assert(pb->share_flag == PBSH_SMAP);
2112 if(!(pr = map_clone_ph_block(vmd, vrd, pr, &iter)))
2113 return ENOMEM;
2115 physr_incr_iter(&iter);
2116 off += VM_PAGE_SIZE;
2119 return OK;
2123 /*===========================================================================*
2124 * rm_phys_regions *
2125 *===========================================================================*/
2126 static void rm_phys_regions(struct vir_region *region,
2127 vir_bytes begin, vir_bytes length)
2129 /* Remove all phys regions between @begin and @begin+length.
2131 * Don't update the page table, because we will update it at map_memory()
2132 * later.
2134 struct phys_region *pr;
2135 physr_iter iter;
2137 physr_start_iter(region->phys, &iter, begin, AVL_GREATER_EQUAL);
2138 while((pr = physr_get_iter(&iter)) && pr->offset < begin + length) {
2139 pb_unreferenced(region, pr, 1);
2140 physr_start_iter(region->phys, &iter, begin,
2141 AVL_GREATER_EQUAL);
2142 SLABFREE(pr);
2146 /*===========================================================================*
2147 * map_memory *
2148 *===========================================================================*/
2149 int map_memory(endpoint_t sour, endpoint_t dest,
2150 vir_bytes virt_s, vir_bytes virt_d, vir_bytes length, int flag)
2152 /* This is the entry point. This function will be called by handle_memory() when
2153 * VM recieves a map-memory request.
2155 struct vmproc *vms, *vmd;
2156 struct vir_region *vrs, *vrd;
2157 vir_bytes offset_s, offset_d;
2158 int p;
2159 int r;
2161 if(vm_isokendpt(sour, &p) != OK)
2162 panic("map_memory: bad endpoint: %d", sour);
2163 vms = &vmproc[p];
2164 if(vm_isokendpt(dest, &p) != OK)
2165 panic("map_memory: bad endpoint: %d", dest);
2166 vmd = &vmproc[p];
2168 vrs = map_lookup(vms, virt_s, NULL);
2169 assert(vrs);
2170 vrd = map_lookup(vmd, virt_d, NULL);
2171 assert(vrd);
2173 /* Linear address -> offset from start of vir region. */
2174 offset_s = virt_s - vrs->vaddr;
2175 offset_d = virt_d - vrd->vaddr;
2177 /* Make sure that the range in the source process has been mapped
2178 * to physical memory.
2180 map_handle_memory(vms, vrs, offset_s, length, 0);
2182 /* Prepare work. */
2183 rm_phys_regions(vrd, offset_d, length);
2185 /* Map memory. */
2186 r = do_map_memory(vms, vmd, vrs, vrd, offset_s, offset_d, length, flag);
2188 return r;
2191 /*===========================================================================*
2192 * get_clean_phys_region *
2193 *===========================================================================*/
2194 static struct phys_region *
2195 get_clean_phys_region(struct vmproc *vmp, vir_bytes vaddr, struct vir_region **ret_region)
2197 struct vir_region *region;
2198 vir_bytes mapaddr;
2199 struct phys_region *ph;
2201 mapaddr = vaddr;
2203 if(!(region = map_lookup(vmp, mapaddr, &ph)) || !ph) {
2204 printf("VM: get_clean_phys_region: 0x%lx not found\n", vaddr);
2205 return NULL;
2208 assert(mapaddr >= region->vaddr);
2209 assert(mapaddr < region->vaddr + region->length);
2211 /* If it's mapped more than once, make a copy. */
2212 assert(ph->ph->refcount > 0);
2213 if(ph->ph->refcount > 1) {
2214 if(!(ph = map_clone_ph_block(vmp, region,
2215 ph, NULL))) {
2216 printf("VM: get_clean_phys_region: ph copy failed\n");
2217 return NULL;
2221 assert(ph->ph->refcount == 1);
2223 *ret_region = region;
2225 return ph;
2228 static int getblock(struct vmproc *vmp, u64_t id, vir_bytes vaddr, int pages)
2230 yielded_t *yb;
2231 struct phys_region *ph;
2232 struct vir_region *region;
2233 yielded_avl *avl;
2234 block_id_t blockid;
2235 phys_bytes phaddr;
2236 int p;
2238 /* Try to get the yielded block */
2239 blockid.owner = vmp->vm_endpoint;
2240 blockid.id = id;
2241 avl = get_yielded_avl(blockid);
2242 if(!(yb = yielded_search(avl, blockid, AVL_EQUAL))) {
2243 return ESRCH;
2246 if(yb->pages != pages) {
2247 printf("VM: getblock: length mismatch (%d != %d)\n",
2248 pages, yb->pages);
2249 return EFAULT;
2252 phaddr = yb->physaddr;
2254 for(p = 0; p < pages; p++) {
2255 /* Get the intended phys region, make sure refcount is 1. */
2256 if(!(ph = get_clean_phys_region(vmp, vaddr, &region))) {
2257 printf("VM: getblock: not found for %d\n", vmp->vm_endpoint);
2258 return EINVAL;
2261 assert(ph->ph->refcount == 1);
2263 /* Free the block that is currently there. */
2264 free_mem(ABS2CLICK(ph->ph->phys), 1);
2266 /* Set the phys block to new addr and update pagetable. */
2267 USE(ph->ph, ph->ph->phys = phaddr;);
2268 if(map_ph_writept(vmp, region, ph) != OK) {
2269 /* Presumably it was mapped, so there is no reason
2270 * updating should fail.
2272 panic("do_get_block: couldn't write pt");
2275 vaddr += VM_PAGE_SIZE;
2276 phaddr += VM_PAGE_SIZE;
2279 /* Forget about the yielded block and free the struct. */
2280 freeyieldednode(yb, 0);
2282 return OK;
2285 static int yieldblock(struct vmproc *vmp, u64_t id,
2286 vir_bytes vaddr, yielded_t **retyb, int pages)
2288 yielded_t *newyb;
2289 vir_bytes mem_clicks, v, p, new_phaddr;
2290 struct vir_region *region;
2291 struct phys_region *ph = NULL, *prev_ph = NULL, *first_ph = NULL;
2292 yielded_avl *avl;
2293 block_id_t blockid;
2295 /* Makes no sense if yielded block ID already exists, and
2296 * is likely a serious bug in the caller.
2298 blockid.id = id;
2299 blockid.owner = vmp->vm_endpoint;
2300 avl = get_yielded_avl(blockid);
2301 if(yielded_search(avl, blockid, AVL_EQUAL)) {
2302 printf("!");
2303 return EINVAL;
2306 if((vaddr % VM_PAGE_SIZE) || pages < 1) return EFAULT;
2308 v = vaddr;
2309 for(p = 0; p < pages; p++) {
2310 if(!(region = map_lookup(vmp, v, &ph)) || !ph) {
2311 printf("VM: do_yield_block: not found for %d\n",
2312 vmp->vm_endpoint);
2313 return EINVAL;
2315 if(!(region->flags & VR_ANON)) {
2316 printf("VM: yieldblock: non-anon 0x%lx\n", v);
2317 return EFAULT;
2319 if(ph->ph->refcount != 1) {
2320 printf("VM: do_yield_block: mapped not once for %d\n",
2321 vmp->vm_endpoint);
2322 return EFAULT;
2324 if(prev_ph) {
2325 if(ph->ph->phys != prev_ph->ph->phys + VM_PAGE_SIZE) {
2326 printf("VM: physically discontiguous yield\n");
2327 return EINVAL;
2330 prev_ph = ph;
2331 if(!first_ph) first_ph = ph;
2332 v += VM_PAGE_SIZE;
2335 /* Make a new block to record the yielding in. */
2336 if(!SLABALLOC(newyb)) {
2337 return ENOMEM;
2340 assert(!(ph->ph->phys % VM_PAGE_SIZE));
2342 if((mem_clicks = alloc_mem(pages, PAF_CLEAR)) == NO_MEM) {
2343 SLABFREE(newyb);
2344 return ENOMEM;
2347 /* Update yielded block info. */
2348 USE(newyb,
2349 newyb->id = blockid;
2350 newyb->physaddr = first_ph->ph->phys;
2351 newyb->pages = pages;
2352 newyb->younger = NULL;);
2354 new_phaddr = CLICK2ABS(mem_clicks);
2356 /* Set new phys block to new addr and update pagetable. */
2357 v = vaddr;
2358 for(p = 0; p < pages; p++) {
2359 region = map_lookup(vmp, v, &ph);
2360 assert(region && ph);
2361 assert(ph->ph->refcount == 1);
2362 USE(ph->ph,
2363 ph->ph->phys = new_phaddr;);
2364 if(map_ph_writept(vmp, region, ph) != OK) {
2365 /* Presumably it was mapped, so there is no reason
2366 * updating should fail.
2368 panic("yield_block: couldn't write pt");
2370 v += VM_PAGE_SIZE;
2371 new_phaddr += VM_PAGE_SIZE;
2374 /* Remember yielded block. */
2376 yielded_insert(avl, newyb);
2377 vmp->vm_yielded++;
2379 /* Add to LRU list too. It's the youngest block. */
2380 LRUCHECK;
2382 if(lru_youngest) {
2383 USE(lru_youngest,
2384 lru_youngest->younger = newyb;);
2385 } else {
2386 lru_oldest = newyb;
2389 USE(newyb,
2390 newyb->older = lru_youngest;);
2392 lru_youngest = newyb;
2394 LRUCHECK;
2396 if(retyb)
2397 *retyb = newyb;
2399 return OK;
2402 /*===========================================================================*
2403 * do_forgetblocks *
2404 *===========================================================================*/
2405 int do_forgetblocks(message *m)
2407 int n;
2408 struct vmproc *vmp;
2409 endpoint_t caller = m->m_source;
2411 if(vm_isokendpt(caller, &n) != OK)
2412 panic("do_yield_block: message from strange source: %d",
2413 m->m_source);
2415 vmp = &vmproc[n];
2417 free_yielded_proc(vmp);
2419 return OK;
2422 /*===========================================================================*
2423 * do_forgetblock *
2424 *===========================================================================*/
2425 int do_forgetblock(message *m)
2427 int n;
2428 struct vmproc *vmp;
2429 endpoint_t caller = m->m_source;
2430 yielded_t *yb;
2431 u64_t id;
2432 block_id_t blockid;
2433 yielded_avl *avl;
2435 if(vm_isokendpt(caller, &n) != OK)
2436 panic("do_yield_block: message from strange source: %d",
2437 m->m_source);
2439 vmp = &vmproc[n];
2441 id = make64(m->VMFB_IDLO, m->VMFB_IDHI);
2443 blockid.id = id;
2444 blockid.owner = vmp->vm_endpoint;
2445 avl = get_yielded_avl(blockid);
2446 if((yb = yielded_search(avl, blockid, AVL_EQUAL))) {
2447 freeyieldednode(yb, 1);
2450 return OK;
2453 /*===========================================================================*
2454 * do_yieldblockgetblock *
2455 *===========================================================================*/
2456 int do_yieldblockgetblock(message *m)
2458 u64_t yieldid, getid;
2459 int n;
2460 endpoint_t caller = m->m_source;
2461 struct vmproc *vmp;
2462 yielded_t *yb = NULL;
2463 int r = ESRCH;
2464 int pages;
2466 if(vm_isokendpt(caller, &n) != OK)
2467 panic("do_yieldblockgetblock: message from strange source: %d",
2468 m->m_source);
2470 vmp = &vmproc[n];
2472 pages = m->VMYBGB_LEN / VM_PAGE_SIZE;
2474 if((m->VMYBGB_LEN % VM_PAGE_SIZE) || pages < 1) {
2475 static int printed;
2476 if(!printed) {
2477 printed = 1;
2478 printf("vm: non-page-aligned or short block length\n");
2480 return EFAULT;
2483 yieldid = make64(m->VMYBGB_YIELDIDLO, m->VMYBGB_YIELDIDHI);
2484 getid = make64(m->VMYBGB_GETIDLO, m->VMYBGB_GETIDHI);
2486 if(cmp64(yieldid, VM_BLOCKID_NONE) != 0) {
2487 /* A block was given to yield. */
2488 yieldblock(vmp, yieldid, (vir_bytes) m->VMYBGB_VADDR, &yb,
2489 pages);
2492 if(cmp64(getid, VM_BLOCKID_NONE) != 0) {
2493 /* A block was given to get. */
2494 r = getblock(vmp, getid, (vir_bytes) m->VMYBGB_VADDR, pages);
2497 return r;
2500 void map_setparent(struct vmproc *vmp)
2502 region_iter iter;
2503 struct vir_region *vr;
2504 region_start_iter_least(&vmp->vm_regions_avl, &iter);
2505 while((vr = region_get_iter(&iter))) {
2506 USE(vr, vr->parent = vmp;);
2507 region_incr_iter(&iter);