include: reduce default stack size
[minix.git] / servers / vm / region.c
blob21b6e70441824e1f04aa315ab2e4ab881b7fce50
2 #include <minix/com.h>
3 #include <minix/callnr.h>
4 #include <minix/type.h>
5 #include <minix/config.h>
6 #include <minix/const.h>
7 #include <minix/sysutil.h>
8 #include <minix/syslib.h>
9 #include <minix/debug.h>
10 #include <minix/bitmap.h>
11 #include <minix/hash.h>
12 #include <machine/multiboot.h>
14 #include <sys/mman.h>
16 #include <limits.h>
17 #include <stdlib.h>
18 #include <string.h>
19 #include <assert.h>
20 #include <stdint.h>
21 #include <sys/param.h>
23 #include "vm.h"
24 #include "proto.h"
25 #include "util.h"
26 #include "glo.h"
27 #include "region.h"
28 #include "sanitycheck.h"
29 #include "yieldedavl.h"
30 #include "memlist.h"
31 #include "memtype.h"
33 /* LRU list. */
34 static yielded_t *lru_youngest = NULL, *lru_oldest = NULL;
36 static int map_ph_writept(struct vmproc *vmp, struct vir_region *vr,
37 struct phys_region *pr);
39 static phys_bytes freeyieldednode(yielded_t *node, int freemem);
41 static struct vir_region *map_copy_region(struct vmproc *vmp, struct
42 vir_region *vr);
44 #if SANITYCHECKS
45 static void lrucheck(void);
46 #endif
48 /* hash table of yielded blocks */
49 #define YIELD_HASHSIZE 65536
50 static yielded_avl vm_yielded_blocks[YIELD_HASHSIZE];
52 static int avl_inited = 0;
54 void map_region_init(void)
56 int h;
57 assert(!avl_inited);
58 for(h = 0; h < YIELD_HASHSIZE; h++)
59 yielded_init(&vm_yielded_blocks[h]);
60 avl_inited = 1;
63 static yielded_avl *get_yielded_avl(block_id_t id)
65 u32_t h;
67 assert(avl_inited);
69 hash_i_64(id.owner, id.id, h);
70 h = h % YIELD_HASHSIZE;
72 assert(h >= 0);
73 assert(h < YIELD_HASHSIZE);
75 return &vm_yielded_blocks[h];
78 void map_printregion(struct vir_region *vr)
80 int i;
81 struct phys_region *ph;
82 printf("map_printmap: map_name: %s\n", vr->memtype->name);
83 printf("\t%lx (len 0x%lx, %lukB), %p\n",
84 vr->vaddr, vr->length, vr->length/1024, vr->memtype->name);
85 printf("\t\tphysblocks:\n");
86 for(i = 0; i < vr->length/VM_PAGE_SIZE; i++) {
87 if(!(ph=vr->physblocks[i])) continue;
88 printf("\t\t@ %lx (refs %d): phys 0x%lx\n",
89 (vr->vaddr + ph->offset),
90 ph->ph->refcount, ph->ph->phys);
94 struct phys_region *physblock_get(struct vir_region *region, vir_bytes offset)
96 int i;
97 struct phys_region *foundregion;
98 assert(!(offset % VM_PAGE_SIZE));
99 assert(offset >= 0 && offset < region->length);
100 i = offset/VM_PAGE_SIZE;
101 if((foundregion = region->physblocks[i]))
102 assert(foundregion->offset == offset);
103 return foundregion;
106 void physblock_set(struct vir_region *region, vir_bytes offset,
107 struct phys_region *newphysr)
109 int i;
110 assert(!(offset % VM_PAGE_SIZE));
111 assert(offset >= 0 && offset < region->length);
112 i = offset/VM_PAGE_SIZE;
113 if(newphysr) {
114 assert(!region->physblocks[i]);
115 assert(newphysr->offset == offset);
116 } else {
117 assert(region->physblocks[i]);
119 region->physblocks[i] = newphysr;
122 /*===========================================================================*
123 * map_printmap *
124 *===========================================================================*/
125 void map_printmap(vmp)
126 struct vmproc *vmp;
128 struct vir_region *vr;
129 region_iter iter;
131 printf("memory regions in process %d:\n", vmp->vm_endpoint);
133 region_start_iter_least(&vmp->vm_regions_avl, &iter);
134 while((vr = region_get_iter(&iter))) {
135 map_printregion(vr);
136 region_incr_iter(&iter);
140 static struct vir_region *getnextvr(struct vir_region *vr)
142 struct vir_region *nextvr;
143 region_iter v_iter;
144 SLABSANE(vr);
145 region_start_iter(&vr->parent->vm_regions_avl, &v_iter, vr->vaddr, AVL_EQUAL);
146 assert(region_get_iter(&v_iter));
147 assert(region_get_iter(&v_iter) == vr);
148 region_incr_iter(&v_iter);
149 nextvr = region_get_iter(&v_iter);
150 if(!nextvr) return NULL;
151 SLABSANE(nextvr);
152 assert(vr->parent == nextvr->parent);
153 assert(vr->vaddr < nextvr->vaddr);
154 assert(vr->vaddr + vr->length <= nextvr->vaddr);
155 return nextvr;
158 int pr_writable(struct vir_region *vr, struct phys_region *pr)
160 assert(vr->memtype->writable);
161 return ((vr->flags & VR_WRITABLE) && vr->memtype->writable(pr));
164 #if SANITYCHECKS
166 /*===========================================================================*
167 * map_sanitycheck_pt *
168 *===========================================================================*/
169 static int map_sanitycheck_pt(struct vmproc *vmp,
170 struct vir_region *vr, struct phys_region *pr)
172 struct phys_block *pb = pr->ph;
173 int rw;
174 int r;
176 if(pr_writable(vr, pr))
177 rw = PTF_WRITE;
178 else
179 rw = PTF_READ;
181 r = pt_writemap(vmp, &vmp->vm_pt, vr->vaddr + pr->offset,
182 pb->phys, VM_PAGE_SIZE, PTF_PRESENT | PTF_USER | rw, WMF_VERIFY);
184 if(r != OK) {
185 printf("proc %d phys_region 0x%lx sanity check failed\n",
186 vmp->vm_endpoint, pr->offset);
187 map_printregion(vr);
190 return r;
193 /*===========================================================================*
194 * map_sanitycheck *
195 *===========================================================================*/
196 void map_sanitycheck(char *file, int line)
198 struct vmproc *vmp;
200 lrucheck();
202 /* Macro for looping over all physical blocks of all regions of
203 * all processes.
205 #define ALLREGIONS(regioncode, physcode) \
206 for(vmp = vmproc; vmp < &vmproc[VMP_NR]; vmp++) { \
207 vir_bytes voffset; \
208 region_iter v_iter; \
209 struct vir_region *vr; \
210 if(!(vmp->vm_flags & VMF_INUSE)) \
211 continue; \
212 region_start_iter_least(&vmp->vm_regions_avl, &v_iter); \
213 while((vr = region_get_iter(&v_iter))) { \
214 struct phys_region *pr; \
215 regioncode; \
216 for(voffset = 0; voffset < vr->length; \
217 voffset += VM_PAGE_SIZE) { \
218 if(!(pr = physblock_get(vr, voffset))) \
219 continue; \
220 physcode; \
222 region_incr_iter(&v_iter); \
226 #define MYSLABSANE(s) MYASSERT(slabsane_f(__FILE__, __LINE__, s, sizeof(*(s))))
227 /* Basic pointers check. */
228 ALLREGIONS(MYSLABSANE(vr),MYSLABSANE(pr); MYSLABSANE(pr->ph);MYSLABSANE(pr->parent));
229 ALLREGIONS(/* MYASSERT(vr->parent == vmp) */,MYASSERT(pr->parent == vr););
231 /* Do counting for consistency check. */
232 ALLREGIONS(;,USE(pr->ph, pr->ph->seencount = 0;););
233 ALLREGIONS(;,MYASSERT(pr->offset == voffset););
234 ALLREGIONS(;,USE(pr->ph, pr->ph->seencount++;);
235 if(pr->ph->seencount == 1) {
236 if(pr->parent->memtype->ev_sanitycheck)
237 pr->parent->memtype->ev_sanitycheck(pr, file, line);
241 /* Do consistency check. */
242 ALLREGIONS({ struct vir_region *nextvr = getnextvr(vr);
243 if(nextvr) {
244 MYASSERT(vr->vaddr < nextvr->vaddr);
245 MYASSERT(vr->vaddr + vr->length <= nextvr->vaddr);
248 MYASSERT(!(vr->vaddr % VM_PAGE_SIZE));,
249 if(pr->ph->refcount != pr->ph->seencount) {
250 map_printmap(vmp);
251 printf("ph in vr %p: 0x%lx refcount %u "
252 "but seencount %u\n",
253 vr, pr->offset,
254 pr->ph->refcount, pr->ph->seencount);
257 int n_others = 0;
258 struct phys_region *others;
259 if(pr->ph->refcount > 0) {
260 MYASSERT(pr->ph->firstregion);
261 if(pr->ph->refcount == 1) {
262 MYASSERT(pr->ph->firstregion == pr);
264 } else {
265 MYASSERT(!pr->ph->firstregion);
267 for(others = pr->ph->firstregion; others;
268 others = others->next_ph_list) {
269 MYSLABSANE(others);
270 MYASSERT(others->ph == pr->ph);
271 n_others++;
273 MYASSERT(pr->ph->refcount == n_others);
275 MYASSERT(pr->ph->refcount == pr->ph->seencount);
276 MYASSERT(!(pr->offset % VM_PAGE_SIZE)););
277 ALLREGIONS(,MYASSERT(map_sanitycheck_pt(vmp, vr, pr) == OK));
280 #define LRUCHECK lrucheck()
282 static void lrucheck(void)
284 yielded_t *list;
286 /* list is empty and ok if both ends point to null. */
287 if(!lru_youngest && !lru_oldest)
288 return;
290 /* if not, both should point to something. */
291 SLABSANE(lru_youngest);
292 SLABSANE(lru_oldest);
294 assert(!lru_youngest->younger);
295 assert(!lru_oldest->older);
297 for(list = lru_youngest; list; list = list->older) {
298 SLABSANE(list);
299 if(list->younger) {
300 SLABSANE(list->younger);
301 assert(list->younger->older == list);
302 } else assert(list == lru_youngest);
303 if(list->older) {
304 SLABSANE(list->older);
305 assert(list->older->younger == list);
306 } else assert(list == lru_oldest);
310 void blockstats(void)
312 yielded_t *list;
313 int blocks = 0;
314 phys_bytes mem = 0;
315 clock_t ticks;
316 int s;
318 s = getuptime(&ticks);
320 assert(s == OK);
322 LRUCHECK;
324 for(list = lru_youngest; list; list = list->older) {
325 mem += VM_PAGE_SIZE;
326 blocks++;
329 if(blocks > 0)
330 printf("%d blocks, %lukB; ", blocks, mem/1024);
332 printmemstats();
334 #else
335 #define LRUCHECK
336 #endif
339 /*=========================================================================*
340 * map_ph_writept *
341 *=========================================================================*/
342 static int map_ph_writept(struct vmproc *vmp, struct vir_region *vr,
343 struct phys_region *pr)
345 int rw;
346 struct phys_block *pb = pr->ph;
348 assert(vr);
349 assert(pr);
350 assert(pb);
352 assert(!(vr->vaddr % VM_PAGE_SIZE));
353 assert(!(pr->offset % VM_PAGE_SIZE));
354 assert(pb->refcount > 0);
356 if(pr_writable(vr, pr))
357 rw = PTF_WRITE;
358 else
359 rw = PTF_READ;
361 if(pt_writemap(vmp, &vmp->vm_pt, vr->vaddr + pr->offset,
362 pb->phys, VM_PAGE_SIZE, PTF_PRESENT | PTF_USER | rw,
363 #if SANITYCHECKS
364 !pr->written ? 0 :
365 #endif
366 WMF_OVERWRITE) != OK) {
367 printf("VM: map_writept: pt_writemap failed\n");
368 return ENOMEM;
371 #if SANITYCHECKS
372 USE(pr, pr->written = 1;);
373 #endif
375 return OK;
378 #define SLOT_FAIL ((vir_bytes) -1)
380 /*===========================================================================*
381 * region_find_slot_range *
382 *===========================================================================*/
383 static vir_bytes region_find_slot_range(struct vmproc *vmp,
384 vir_bytes minv, vir_bytes maxv, vir_bytes length)
386 struct vir_region *lastregion;
387 vir_bytes startv = 0;
388 int foundflag = 0;
389 region_iter iter;
391 SANITYCHECK(SCL_FUNCTIONS);
393 /* Length must be reasonable. */
394 assert(length > 0);
396 /* Special case: allow caller to set maxv to 0 meaning 'I want
397 * it to be mapped in right here.'
399 if(maxv == 0) {
400 maxv = minv + length;
402 /* Sanity check. */
403 if(maxv <= minv) {
404 printf("region_find_slot: minv 0x%lx and bytes 0x%lx\n",
405 minv, length);
406 return SLOT_FAIL;
410 /* Basic input sanity checks. */
411 assert(!(length % VM_PAGE_SIZE));
412 if(minv >= maxv) {
413 printf("VM: 1 minv: 0x%lx maxv: 0x%lx length: 0x%lx\n",
414 minv, maxv, length);
417 assert(minv < maxv);
419 if(minv + length > maxv)
420 return SLOT_FAIL;
422 #define FREEVRANGE_TRY(rangestart, rangeend) { \
423 vir_bytes frstart = (rangestart), frend = (rangeend); \
424 frstart = MAX(frstart, minv); \
425 frend = MIN(frend, maxv); \
426 if(frend > frstart && (frend - frstart) >= length) { \
427 startv = frend-length; \
428 foundflag = 1; \
431 #define FREEVRANGE(start, end) { \
432 assert(!foundflag); \
433 FREEVRANGE_TRY(((start)+VM_PAGE_SIZE), ((end)-VM_PAGE_SIZE)); \
434 if(!foundflag) { \
435 FREEVRANGE_TRY((start), (end)); \
439 /* find region after maxv. */
440 region_start_iter(&vmp->vm_regions_avl, &iter, maxv, AVL_GREATER_EQUAL);
441 lastregion = region_get_iter(&iter);
443 if(!lastregion) {
444 /* This is the free virtual address space after the last region. */
445 region_start_iter(&vmp->vm_regions_avl, &iter, maxv, AVL_LESS);
446 lastregion = region_get_iter(&iter);
447 FREEVRANGE(lastregion ?
448 lastregion->vaddr+lastregion->length : 0, VM_DATATOP);
451 if(!foundflag) {
452 struct vir_region *vr;
453 while((vr = region_get_iter(&iter)) && !foundflag) {
454 struct vir_region *nextvr;
455 region_decr_iter(&iter);
456 nextvr = region_get_iter(&iter);
457 FREEVRANGE(nextvr ? nextvr->vaddr+nextvr->length : 0,
458 vr->vaddr);
462 if(!foundflag) {
463 return SLOT_FAIL;
466 /* However we got it, startv must be in the requested range. */
467 assert(startv >= minv);
468 assert(startv < maxv);
469 assert(startv + length <= maxv);
471 /* remember this position as a hint for next time. */
472 vmp->vm_region_top = startv + length;
474 return startv;
477 /*===========================================================================*
478 * region_find_slot *
479 *===========================================================================*/
480 static vir_bytes region_find_slot(struct vmproc *vmp,
481 vir_bytes minv, vir_bytes maxv, vir_bytes length)
483 vir_bytes v, hint = vmp->vm_region_top;
485 /* use the top of the last inserted region as a minv hint if
486 * possible. remember that a zero maxv is a special case.
489 if(maxv && hint < maxv && hint >= minv) {
490 v = region_find_slot_range(vmp, minv, hint, length);
492 if(v != SLOT_FAIL)
493 return v;
496 return region_find_slot_range(vmp, minv, maxv, length);
499 static int phys_slot(vir_bytes len)
501 assert(!(len % VM_PAGE_SIZE));
502 return len / VM_PAGE_SIZE;
505 struct vir_region *region_new(struct vmproc *vmp, vir_bytes startv, vir_bytes length,
506 int flags, mem_type_t *memtype)
508 struct vir_region *newregion;
509 struct phys_region **physregions;
510 static u32_t id;
511 int slots = phys_slot(length);
513 if(!(SLABALLOC(newregion))) {
514 printf("vm: region_new: could not allocate\n");
515 return NULL;
518 /* Fill in node details. */
519 USE(newregion,
520 memset(newregion, 0, sizeof(*newregion));
521 newregion->vaddr = startv;
522 newregion->length = length;
523 newregion->flags = flags;
524 newregion->memtype = memtype;
525 newregion->remaps = 0;
526 newregion->id = id++;
527 newregion->lower = newregion->higher = NULL;
528 newregion->parent = vmp;);
530 if(!(physregions = calloc(slots, sizeof(struct phys_region *)))) {
531 printf("VM: region_new: allocating phys blocks failed\n");
532 SLABFREE(newregion);
533 return NULL;
536 USE(newregion, newregion->physblocks = physregions;);
538 return newregion;
541 /*===========================================================================*
542 * map_page_region *
543 *===========================================================================*/
544 struct vir_region *map_page_region(vmp, minv, maxv, length,
545 flags, mapflags, memtype)
546 struct vmproc *vmp;
547 vir_bytes minv;
548 vir_bytes maxv;
549 vir_bytes length;
550 u32_t flags;
551 int mapflags;
552 mem_type_t *memtype;
554 struct vir_region *newregion;
555 vir_bytes startv;
557 assert(!(length % VM_PAGE_SIZE));
559 SANITYCHECK(SCL_FUNCTIONS);
561 startv = region_find_slot(vmp, minv, maxv, length);
562 if (startv == SLOT_FAIL)
563 return NULL;
565 /* Now we want a new region. */
566 if(!(newregion = region_new(vmp, startv, length, flags, memtype))) {
567 printf("VM: map_page_region: allocating region failed\n");
568 return NULL;
571 /* If a new event is specified, invoke it. */
572 if(newregion->memtype->ev_new) {
573 if(newregion->memtype->ev_new(newregion) != OK) {
574 /* ev_new will have freed and removed the region */
575 return NULL;
579 if(mapflags & MF_PREALLOC) {
580 if(map_handle_memory(vmp, newregion, 0, length, 1) != OK) {
581 printf("VM: map_page_region: prealloc failed\n");
582 free(newregion->physblocks);
583 USE(newregion,
584 newregion->physblocks = NULL;);
585 SLABFREE(newregion);
586 return NULL;
590 /* Pre-allocations should be uninitialized, but after that it's a
591 * different story.
593 USE(newregion, newregion->flags &= ~VR_UNINITIALIZED;);
595 /* Link it. */
596 region_insert(&vmp->vm_regions_avl, newregion);
598 #if SANITYCHECKS
599 assert(startv == newregion->vaddr);
601 struct vir_region *nextvr;
602 if((nextvr = getnextvr(newregion))) {
603 assert(newregion->vaddr < nextvr->vaddr);
606 #endif
608 SANITYCHECK(SCL_FUNCTIONS);
610 return newregion;
613 /*===========================================================================*
614 * map_subfree *
615 *===========================================================================*/
616 static int map_subfree(struct vir_region *region,
617 vir_bytes start, vir_bytes len)
619 struct phys_region *pr;
620 vir_bytes end = start+len;
621 vir_bytes voffset;
623 #if SANITYCHECKS
624 SLABSANE(region);
625 for(voffset = 0; voffset < phys_slot(region->length);
626 voffset += VM_PAGE_SIZE) {
627 struct phys_region *others;
628 struct phys_block *pb;
630 if(!(pr = physblock_get(region, voffset)))
631 continue;
633 pb = pr->ph;
635 for(others = pb->firstregion; others;
636 others = others->next_ph_list) {
637 assert(others->ph == pb);
640 #endif
642 for(voffset = start; voffset < end; voffset+=VM_PAGE_SIZE) {
643 if(!(pr = physblock_get(region, voffset)))
644 continue;
645 assert(pr->offset >= start);
646 assert(pr->offset < end);
647 pb_unreferenced(region, pr, 1);
648 SLABFREE(pr);
651 return OK;
654 /*===========================================================================*
655 * map_free *
656 *===========================================================================*/
657 int map_free(struct vir_region *region)
659 int r;
661 if((r=map_subfree(region, 0, region->length)) != OK) {
662 printf("%d\n", __LINE__);
663 return r;
666 if(region->memtype->ev_delete)
667 region->memtype->ev_delete(region);
668 free(region->physblocks);
669 region->physblocks = NULL;
670 SLABFREE(region);
672 return OK;
675 /*===========================================================================*
676 * yielded_block_cmp *
677 *===========================================================================*/
678 int yielded_block_cmp(struct block_id *id1, struct block_id *id2)
680 if(id1->owner < id2->owner)
681 return -1;
682 if(id1->owner > id2->owner)
683 return 1;
684 return cmp64(id1->id, id2->id);
688 /*===========================================================================*
689 * free_yielded_proc *
690 *===========================================================================*/
691 static vir_bytes free_yielded_proc(struct vmproc *vmp)
693 vir_bytes total = 0;
694 int h;
696 SANITYCHECK(SCL_FUNCTIONS);
698 /* Free associated regions. */
699 for(h = 0; h < YIELD_HASHSIZE && vmp->vm_yielded > 0; h++) {
700 yielded_t *yb;
701 yielded_iter iter;
702 yielded_avl *avl = &vm_yielded_blocks[h];
703 yielded_start_iter_least(avl, &iter);
704 while((yb = yielded_get_iter(&iter))) {
705 yielded_t *next_yb;
706 SLABSANE(yb);
707 yielded_incr_iter(&iter);
708 if(yb->id.owner != vmp->vm_endpoint)
709 continue;
710 next_yb = yielded_get_iter(&iter);
711 total += freeyieldednode(yb, 1);
712 /* the above removal invalidated our iter; restart it
713 * for the node we want to start at.
715 if(!next_yb) break;
716 yielded_start_iter(avl, &iter, next_yb->id, AVL_EQUAL);
717 assert(yielded_get_iter(&iter) == next_yb);
721 return total;
725 static phys_bytes freeyieldednode(yielded_t *node, int freemem)
727 yielded_t *older, *younger, *removed;
728 yielded_avl *avl;
729 int p;
731 SLABSANE(node);
733 LRUCHECK;
735 /* Update LRU. */
737 younger = node->younger;
738 older = node->older;
740 if(younger) {
741 SLABSANE(younger);
742 assert(younger->older == node);
743 USE(younger, younger->older = node->older;);
744 } else {
745 assert(node == lru_youngest);
746 lru_youngest = node->older;
749 if(older) {
750 SLABSANE(older);
751 assert(older->younger == node);
752 USE(older, older->younger = node->younger;);
753 } else {
754 assert(node == lru_oldest);
755 lru_oldest = node->younger;
758 LRUCHECK;
760 /* Update AVL. */
762 if(vm_isokendpt(node->id.owner, &p) != OK)
763 panic("out of date owner of yielded block %d", node->id.owner);
764 avl = get_yielded_avl(node->id);
765 removed = yielded_remove(avl, node->id);
766 assert(removed == node);
767 assert(vmproc[p].vm_yielded > 0);
768 vmproc[p].vm_yielded--;
770 /* Free associated memory if requested. */
772 if(freemem) {
773 free_mem(ABS2CLICK(node->physaddr), node->pages);
776 /* Free node. */
777 SLABFREE(node);
779 return VM_PAGE_SIZE;
782 /*========================================================================*
783 * free_yielded *
784 *========================================================================*/
785 vir_bytes free_yielded(vir_bytes max_bytes)
788 /* PRIVATE yielded_t *lru_youngest = NULL, *lru_oldest = NULL; */
789 vir_bytes freed = 0;
790 int blocks = 0;
792 while(freed < max_bytes && lru_oldest) {
793 SLABSANE(lru_oldest);
794 freed += freeyieldednode(lru_oldest, 1);
795 blocks++;
798 return freed;
801 /*========================================================================*
802 * map_free_proc *
803 *========================================================================*/
804 int map_free_proc(vmp)
805 struct vmproc *vmp;
807 struct vir_region *r;
809 while((r = region_search_root(&vmp->vm_regions_avl))) {
810 SANITYCHECK(SCL_DETAIL);
811 #if SANITYCHECKS
812 nocheck++;
813 #endif
814 region_remove(&vmp->vm_regions_avl, r->vaddr); /* For sanity checks. */
815 map_free(r);
816 #if SANITYCHECKS
817 nocheck--;
818 #endif
819 SANITYCHECK(SCL_DETAIL);
822 region_init(&vmp->vm_regions_avl);
824 /* Free associated yielded blocks. */
825 free_yielded_proc(vmp);
827 SANITYCHECK(SCL_FUNCTIONS);
829 return OK;
832 /*===========================================================================*
833 * map_lookup *
834 *===========================================================================*/
835 struct vir_region *map_lookup(vmp, offset, physr)
836 struct vmproc *vmp;
837 vir_bytes offset;
838 struct phys_region **physr;
840 struct vir_region *r;
842 SANITYCHECK(SCL_FUNCTIONS);
844 #if SANITYCHECKS
845 if(!region_search_root(&vmp->vm_regions_avl))
846 panic("process has no regions: %d", vmp->vm_endpoint);
847 #endif
849 if((r = region_search(&vmp->vm_regions_avl, offset, AVL_LESS_EQUAL))) {
850 vir_bytes ph;
851 if(offset >= r->vaddr && offset < r->vaddr + r->length) {
852 ph = offset - r->vaddr;
853 if(physr) {
854 *physr = physblock_get(r, ph);
855 if(*physr) assert((*physr)->offset == ph);
857 return r;
861 SANITYCHECK(SCL_FUNCTIONS);
863 return NULL;
866 u32_t vrallocflags(u32_t flags)
868 u32_t allocflags = 0;
870 if(flags & VR_PHYS64K)
871 allocflags |= PAF_ALIGN64K;
872 if(flags & VR_LOWER16MB)
873 allocflags |= PAF_LOWER16MB;
874 if(flags & VR_LOWER1MB)
875 allocflags |= PAF_LOWER1MB;
876 if(flags & VR_CONTIG)
877 allocflags |= PAF_CONTIG;
878 if(!(flags & VR_UNINITIALIZED))
879 allocflags |= PAF_CLEAR;
881 return allocflags;
884 /*===========================================================================*
885 * map_clone_ph_block *
886 *===========================================================================*/
887 struct phys_region *map_clone_ph_block(vmp, region, ph)
888 struct vmproc *vmp;
889 struct vir_region *region;
890 struct phys_region *ph;
892 vir_bytes offset;
893 u32_t allocflags;
894 phys_bytes physaddr;
895 struct phys_region *newpr;
896 int region_has_single_block;
897 SANITYCHECK(SCL_FUNCTIONS);
899 /* Warning: this function will free the passed
900 * phys_region *ph and replace it (in the same offset)
901 * with another! So both the pointer to it
902 * and any iterators over the phys_regions in the vir_region
903 * will be invalid on successful return. (Iterators over
904 * the vir_region could be invalid on unsuccessful return too.)
907 /* This is only to be done if there is more than one copy. */
908 assert(ph->ph->refcount > 1);
910 /* This function takes a physical block, copies its contents
911 * into newly allocated memory, and replaces the single physical
912 * block by one or more physical blocks with refcount 1 with the
913 * same contents as the original. In other words, a fragmentable
914 * version of map_copy_ph_block().
917 /* Remember where and how much. */
918 offset = ph->offset;
919 physaddr = ph->ph->phys;
921 /* Now unlink the original physical block so we can replace
922 * it with new ones.
925 SLABSANE(ph);
926 SLABSANE(ph->ph);
927 assert(ph->ph->refcount > 1);
928 pb_unreferenced(region, ph, 1);
929 SLABFREE(ph);
931 SANITYCHECK(SCL_DETAIL);
933 /* Put new free memory in. */
934 allocflags = vrallocflags(region->flags | VR_UNINITIALIZED);
935 region_has_single_block = (offset == 0 && region->length == VM_PAGE_SIZE);
936 assert(region_has_single_block || !(allocflags & PAF_CONTIG));
937 assert(!(allocflags & PAF_CLEAR));
939 if(map_pf(vmp, region, offset, 1) != OK) {
940 /* XXX original range now gone. */
941 printf("VM: map_clone_ph_block: map_pf failed.\n");
942 return NULL;
945 /* Copy the block to the new memory.
946 * Can only fail if map_new_physblock didn't do what we asked.
948 if(copy_abs2region(physaddr, region, offset, VM_PAGE_SIZE) != OK)
949 panic("copy_abs2region failed, no good reason for that");
951 newpr = physblock_get(region, offset);
952 assert(newpr);
953 assert(newpr->offset == offset);
955 SANITYCHECK(SCL_FUNCTIONS);
957 return newpr;
961 /*===========================================================================*
962 * map_pf *
963 *===========================================================================*/
964 int map_pf(vmp, region, offset, write)
965 struct vmproc *vmp;
966 struct vir_region *region;
967 vir_bytes offset;
968 int write;
970 struct phys_region *ph;
971 int r = OK;
973 offset -= offset % VM_PAGE_SIZE;
975 assert(offset >= 0);
976 assert(offset < region->length);
978 assert(!(region->vaddr % VM_PAGE_SIZE));
979 assert(!(write && !(region->flags & VR_WRITABLE)));
981 SANITYCHECK(SCL_FUNCTIONS);
983 if(!(ph = physblock_get(region, offset))) {
984 struct phys_block *pb;
986 /* New block. */
988 if(!(pb = pb_new(MAP_NONE))) {
989 printf("map_pf: pb_new failed\n");
990 return ENOMEM;
993 if(!(ph = pb_reference(pb, offset, region))) {
994 printf("map_pf: pb_reference failed\n");
995 pb_free(pb);
996 return ENOMEM;
1000 assert(ph);
1001 assert(ph->ph);
1003 /* If we're writing and the block is already
1004 * writable, nothing to do.
1007 assert(region->memtype->writable);
1009 if(!write || !region->memtype->writable(ph)) {
1010 assert(region->memtype->ev_pagefault);
1011 assert(ph->ph);
1013 if((r = region->memtype->ev_pagefault(vmp,
1014 region, ph, write)) == SUSPEND) {
1015 panic("map_pf: memtype->ev_pagefault returned SUSPEND\n");
1016 return SUSPEND;
1019 if(r != OK) {
1020 printf("map_pf: memtype->ev_pagefault failed\n");
1021 if(ph)
1022 pb_unreferenced(region, ph, 1);
1023 return r;
1026 assert(ph);
1027 assert(ph->ph);
1030 assert(ph->ph);
1032 if((r = map_ph_writept(vmp, region, ph)) != OK) {
1033 printf("map_pf: writept failed\n");
1034 return r;
1037 SANITYCHECK(SCL_FUNCTIONS);
1039 #if SANITYCHECKS
1040 if(OK != pt_checkrange(&vmp->vm_pt, region->vaddr+offset,
1041 VM_PAGE_SIZE, write)) {
1042 panic("map_pf: pt_checkrange failed: %d", r);
1044 #endif
1046 return r;
1049 int map_handle_memory(vmp, region, start_offset, length, write)
1050 struct vmproc *vmp;
1051 struct vir_region *region;
1052 vir_bytes start_offset;
1053 vir_bytes length;
1054 int write;
1056 vir_bytes offset, lim;
1057 int r;
1059 assert(length > 0);
1060 lim = start_offset + length;
1061 assert(lim > start_offset);
1063 for(offset = start_offset; offset < lim; offset += VM_PAGE_SIZE)
1064 if((r = map_pf(vmp, region, offset, write)) != OK)
1065 return r;
1067 return OK;
1070 /*===========================================================================*
1071 * map_pin_memory *
1072 *===========================================================================*/
1073 int map_pin_memory(struct vmproc *vmp)
1075 struct vir_region *vr;
1076 int r;
1077 region_iter iter;
1078 region_start_iter_least(&vmp->vm_regions_avl, &iter);
1079 /* Scan all memory regions. */
1080 while((vr = region_get_iter(&iter))) {
1081 /* Make sure region is mapped to physical memory and writable.*/
1082 r = map_handle_memory(vmp, vr, 0, vr->length, 1);
1083 if(r != OK) {
1084 panic("map_pin_memory: map_handle_memory failed: %d", r);
1086 region_incr_iter(&iter);
1088 return OK;
1091 /*===========================================================================*
1092 * map_copy_region *
1093 *===========================================================================*/
1094 static struct vir_region *map_copy_region(struct vmproc *vmp, struct vir_region *vr)
1096 /* map_copy_region creates a complete copy of the vir_region
1097 * data structure, linking in the same phys_blocks directly,
1098 * but all in limbo, i.e., the caller has to link the vir_region
1099 * to a process. Therefore it doesn't increase the refcount in
1100 * the phys_block; the caller has to do this once it's linked.
1101 * The reason for this is to keep the sanity checks working
1102 * within this function.
1104 struct vir_region *newvr;
1105 struct phys_region *ph;
1106 int r;
1107 #if SANITYCHECKS
1108 int cr;
1109 cr = physregions(vr);
1110 #endif
1111 vir_bytes p;
1113 if(!(newvr = region_new(vr->parent, vr->vaddr, vr->length, vr->flags, vr->memtype)))
1114 return NULL;
1116 if(vr->memtype->ev_copy && (r=vr->memtype->ev_copy(vr, newvr)) != OK) {
1117 map_free(newvr);
1118 printf("VM: memtype-specific copy failed (%d)\n", r);
1119 return NULL;
1122 for(p = 0; p < phys_slot(vr->length); p++) {
1123 if(!(ph = physblock_get(vr, p*VM_PAGE_SIZE))) continue;
1124 struct phys_region *newph = pb_reference(ph->ph, ph->offset, newvr);
1126 if(!newph) { map_free(newvr); return NULL; }
1128 #if SANITYCHECKS
1129 USE(newph, newph->written = 0;);
1130 assert(physregions(vr) == cr);
1131 #endif
1134 #if SANITYCHECKS
1135 assert(physregions(vr) == physregions(newvr));
1136 #endif
1138 return newvr;
1141 /*===========================================================================*
1142 * copy_abs2region *
1143 *===========================================================================*/
1144 int copy_abs2region(phys_bytes abs, struct vir_region *destregion,
1145 phys_bytes offset, phys_bytes len)
1148 assert(destregion);
1149 assert(destregion->physblocks);
1150 while(len > 0) {
1151 phys_bytes sublen, suboffset;
1152 struct phys_region *ph;
1153 assert(destregion);
1154 assert(destregion->physblocks);
1155 if(!(ph = physblock_get(destregion, offset))) {
1156 printf("VM: copy_abs2region: no phys region found (1).\n");
1157 return EFAULT;
1159 assert(ph->offset <= offset);
1160 if(ph->offset+VM_PAGE_SIZE <= offset) {
1161 printf("VM: copy_abs2region: no phys region found (2).\n");
1162 return EFAULT;
1164 suboffset = offset - ph->offset;
1165 assert(suboffset < VM_PAGE_SIZE);
1166 sublen = len;
1167 if(sublen > VM_PAGE_SIZE - suboffset)
1168 sublen = VM_PAGE_SIZE - suboffset;
1169 assert(suboffset + sublen <= VM_PAGE_SIZE);
1170 if(ph->ph->refcount != 1) {
1171 printf("VM: copy_abs2region: refcount not 1.\n");
1172 return EFAULT;
1175 if(sys_abscopy(abs, ph->ph->phys + suboffset, sublen) != OK) {
1176 printf("VM: copy_abs2region: abscopy failed.\n");
1177 return EFAULT;
1179 abs += sublen;
1180 offset += sublen;
1181 len -= sublen;
1184 return OK;
1187 /*=========================================================================*
1188 * map_writept *
1189 *=========================================================================*/
1190 int map_writept(struct vmproc *vmp)
1192 struct vir_region *vr;
1193 struct phys_region *ph;
1194 int r;
1195 region_iter v_iter;
1196 region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
1198 while((vr = region_get_iter(&v_iter))) {
1199 vir_bytes p;
1200 for(p = 0; p < vr->length; p += VM_PAGE_SIZE) {
1201 if(!(ph = physblock_get(vr, p))) continue;
1203 if((r=map_ph_writept(vmp, vr, ph)) != OK) {
1204 printf("VM: map_writept: failed\n");
1205 return r;
1208 region_incr_iter(&v_iter);
1211 return OK;
1214 /*========================================================================*
1215 * map_proc_copy *
1216 *========================================================================*/
1217 int map_proc_copy(dst, src)
1218 struct vmproc *dst;
1219 struct vmproc *src;
1221 /* Copy all the memory regions from the src process to the dst process. */
1222 region_init(&dst->vm_regions_avl);
1224 return map_proc_copy_from(dst, src, NULL);
1227 /*========================================================================*
1228 * map_proc_copy_from *
1229 *========================================================================*/
1230 int map_proc_copy_from(dst, src, start_src_vr)
1231 struct vmproc *dst;
1232 struct vmproc *src;
1233 struct vir_region *start_src_vr;
1235 struct vir_region *vr;
1236 region_iter v_iter;
1238 if(!start_src_vr)
1239 start_src_vr = region_search_least(&src->vm_regions_avl);
1241 assert(start_src_vr);
1242 assert(start_src_vr->parent == src);
1243 region_start_iter(&src->vm_regions_avl, &v_iter,
1244 start_src_vr->vaddr, AVL_EQUAL);
1245 assert(region_get_iter(&v_iter) == start_src_vr);
1247 /* Copy source regions after the destination's last region (if any). */
1249 SANITYCHECK(SCL_FUNCTIONS);
1251 while((vr = region_get_iter(&v_iter))) {
1252 struct vir_region *newvr;
1253 if(!(newvr = map_copy_region(dst, vr))) {
1254 map_free_proc(dst);
1255 return ENOMEM;
1257 USE(newvr, newvr->parent = dst;);
1258 region_insert(&dst->vm_regions_avl, newvr);
1259 assert(vr->length == newvr->length);
1261 #if SANITYCHECKS
1263 vir_bytes vaddr;
1264 struct phys_region *orig_ph, *new_ph;
1265 assert(vr->physblocks != newvr->physblocks);
1266 for(vaddr = 0; vaddr < vr->length; vaddr += VM_PAGE_SIZE) {
1267 orig_ph = physblock_get(vr, vaddr);
1268 new_ph = physblock_get(newvr, vaddr);
1269 if(!orig_ph) { assert(!new_ph); continue;}
1270 assert(new_ph);
1271 assert(orig_ph != new_ph);
1272 assert(orig_ph->ph == new_ph->ph);
1275 #endif
1276 region_incr_iter(&v_iter);
1279 map_writept(src);
1280 map_writept(dst);
1282 SANITYCHECK(SCL_FUNCTIONS);
1283 return OK;
1286 int map_region_extend_upto_v(struct vmproc *vmp, vir_bytes v)
1288 vir_bytes offset = v;
1289 struct vir_region *vr, *nextvr;
1290 struct phys_region **newpr;
1291 int newslots, prevslots, addedslots;
1293 offset = roundup(offset, VM_PAGE_SIZE);
1295 if(!(vr = region_search(&vmp->vm_regions_avl, offset, AVL_LESS))) {
1296 printf("VM: nothing to extend\n");
1297 return ENOMEM;
1300 if(vr->vaddr + vr->length >= v) return OK;
1302 assert(vr->vaddr <= offset);
1303 newslots = phys_slot(offset - vr->vaddr);
1304 prevslots = phys_slot(vr->length);
1305 assert(newslots >= prevslots);
1306 addedslots = newslots - prevslots;
1308 if(!(newpr = realloc(vr->physblocks,
1309 newslots * sizeof(struct phys_region *)))) {
1310 printf("VM: map_region_extend_upto_v: realloc failed\n");
1311 return ENOMEM;
1314 vr->physblocks = newpr;
1315 memset(vr->physblocks + prevslots, 0,
1316 addedslots * sizeof(struct phys_region *));
1318 if((nextvr = getnextvr(vr))) {
1319 assert(offset <= nextvr->vaddr);
1322 if(nextvr && nextvr->vaddr < offset) {
1323 printf("VM: can't grow into next region\n");
1324 return ENOMEM;
1327 if(!vr->memtype->ev_resize) {
1328 printf("VM: can't resize this type of memory\n");
1329 return ENOMEM;
1332 return vr->memtype->ev_resize(vmp, vr, offset - vr->vaddr);
1335 /*========================================================================*
1336 * map_unmap_region *
1337 *========================================================================*/
1338 int map_unmap_region(struct vmproc *vmp, struct vir_region *r,
1339 vir_bytes offset, vir_bytes len)
1341 /* Shrink the region by 'len' bytes, from the start. Unreference
1342 * memory it used to reference if any.
1344 vir_bytes regionstart;
1345 int freeslots = phys_slot(len);
1347 SANITYCHECK(SCL_FUNCTIONS);
1349 if(offset+len > r->length || (len % VM_PAGE_SIZE)) {
1350 printf("VM: bogus length 0x%lx\n", len);
1351 return EINVAL;
1354 regionstart = r->vaddr + offset;
1356 /* unreference its memory */
1357 map_subfree(r, offset, len);
1359 /* if unmap was at start/end of this region, it actually shrinks */
1360 if(offset == 0) {
1361 struct phys_region *pr;
1362 vir_bytes voffset;
1363 int remslots;
1365 region_remove(&vmp->vm_regions_avl, r->vaddr);
1367 USE(r,
1368 r->vaddr += len;
1369 r->length -= len;);
1371 remslots = phys_slot(r->length);
1373 region_insert(&vmp->vm_regions_avl, r);
1375 /* vaddr has increased; to make all the phys_regions
1376 * point to the same addresses, make them shrink by the
1377 * same amount.
1379 for(voffset = offset; voffset < r->length;
1380 voffset += VM_PAGE_SIZE) {
1381 if(!(pr = physblock_get(r, voffset))) continue;
1382 assert(pr->offset >= offset);
1383 USE(pr, pr->offset -= len;);
1385 if(remslots)
1386 memmove(r->physblocks, r->physblocks + freeslots,
1387 remslots * sizeof(struct phys_region *));
1388 } else if(offset + len == r->length) {
1389 assert(len <= r->length);
1390 r->length -= len;
1393 if(r->length == 0) {
1394 /* Whole region disappears. Unlink and free it. */
1395 region_remove(&vmp->vm_regions_avl, r->vaddr);
1396 map_free(r);
1399 SANITYCHECK(SCL_DETAIL);
1401 if(pt_writemap(vmp, &vmp->vm_pt, regionstart,
1402 MAP_NONE, len, 0, WMF_OVERWRITE) != OK) {
1403 printf("VM: map_unmap_region: pt_writemap failed\n");
1404 return ENOMEM;
1407 SANITYCHECK(SCL_FUNCTIONS);
1409 return OK;
1412 /*========================================================================*
1413 * map_get_phys *
1414 *========================================================================*/
1415 int map_get_phys(struct vmproc *vmp, vir_bytes addr, phys_bytes *r)
1417 struct vir_region *vr;
1419 if (!(vr = map_lookup(vmp, addr, NULL)) ||
1420 (vr->vaddr != addr))
1421 return EINVAL;
1423 if (!vr->memtype->regionid)
1424 return EINVAL;
1426 if(r)
1427 *r = vr->memtype->regionid(vr);
1429 return OK;
1432 /*========================================================================*
1433 * map_get_ref *
1434 *========================================================================*/
1435 int map_get_ref(struct vmproc *vmp, vir_bytes addr, u8_t *cnt)
1437 struct vir_region *vr;
1439 if (!(vr = map_lookup(vmp, addr, NULL)) ||
1440 (vr->vaddr != addr) || !vr->memtype->refcount)
1441 return EINVAL;
1443 if (cnt)
1444 *cnt = vr->memtype->refcount(vr);
1446 return OK;
1449 /*========================================================================*
1450 * get_stats_info *
1451 *========================================================================*/
1452 void get_stats_info(struct vm_stats_info *vsi)
1454 yielded_t *yb;
1456 vsi->vsi_cached = 0L;
1458 for(yb = lru_youngest; yb; yb = yb->older)
1459 vsi->vsi_cached++;
1462 void get_usage_info_kernel(struct vm_usage_info *vui)
1464 memset(vui, 0, sizeof(*vui));
1465 vui->vui_total = kernel_boot_info.kernel_allocated_bytes;
1468 static void get_usage_info_vm(struct vm_usage_info *vui)
1470 memset(vui, 0, sizeof(*vui));
1471 vui->vui_total = kernel_boot_info.vm_allocated_bytes +
1472 get_vm_self_pages() * VM_PAGE_SIZE;
1475 /*========================================================================*
1476 * get_usage_info *
1477 *========================================================================*/
1478 void get_usage_info(struct vmproc *vmp, struct vm_usage_info *vui)
1480 struct vir_region *vr;
1481 struct phys_region *ph;
1482 region_iter v_iter;
1483 region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
1484 vir_bytes voffset;
1486 memset(vui, 0, sizeof(*vui));
1488 if(vmp->vm_endpoint == VM_PROC_NR) {
1489 get_usage_info_vm(vui);
1490 return;
1493 if(vmp->vm_endpoint < 0) {
1494 get_usage_info_kernel(vui);
1495 return;
1498 while((vr = region_get_iter(&v_iter))) {
1499 for(voffset = 0; voffset < vr->length; voffset += VM_PAGE_SIZE) {
1500 if(!(ph = physblock_get(vr, voffset))) continue;
1501 /* All present pages are counted towards the total. */
1502 vui->vui_total += VM_PAGE_SIZE;
1504 if (ph->ph->refcount > 1) {
1505 /* Any page with a refcount > 1 is common. */
1506 vui->vui_common += VM_PAGE_SIZE;
1508 /* Any common, non-COW page is shared. */
1509 if (vr->flags & VR_SHARED)
1510 vui->vui_shared += VM_PAGE_SIZE;
1513 region_incr_iter(&v_iter);
1517 /*===========================================================================*
1518 * get_region_info *
1519 *===========================================================================*/
1520 int get_region_info(struct vmproc *vmp, struct vm_region_info *vri,
1521 int max, vir_bytes *nextp)
1523 struct vir_region *vr;
1524 vir_bytes next;
1525 int count;
1526 region_iter v_iter;
1528 next = *nextp;
1530 if (!max) return 0;
1532 region_start_iter(&vmp->vm_regions_avl, &v_iter, next, AVL_GREATER_EQUAL);
1533 if(!(vr = region_get_iter(&v_iter))) return 0;
1535 for(count = 0; (vr = region_get_iter(&v_iter)) && count < max; count++, vri++) {
1536 struct phys_region *ph1 = NULL, *ph2 = NULL;
1537 vir_bytes voffset;
1539 /* Report part of the region that's actually in use. */
1541 /* Get first and last phys_regions, if any */
1542 for(voffset = 0; voffset > vr->length; voffset += VM_PAGE_SIZE) {
1543 struct phys_region *ph;
1544 if(!(ph = physblock_get(vr, voffset))) continue;
1545 if(!ph1) ph1 = ph;
1546 ph2 = ph;
1548 if(!ph1 || !ph2) { assert(!ph1 && !ph2); continue; }
1550 /* Report start+length of region starting from lowest use. */
1551 vri->vri_addr = vr->vaddr + ph1->offset;
1552 vri->vri_prot = 0;
1553 vri->vri_length = ph2->offset + VM_PAGE_SIZE - ph1->offset;
1555 /* "AND" the provided protection with per-page protection. */
1556 if (!(vr->flags & VR_WRITABLE))
1557 vri->vri_prot &= ~PROT_WRITE;
1559 next = vr->vaddr + vr->length;
1560 region_incr_iter(&v_iter);
1563 *nextp = next;
1564 return count;
1567 /*========================================================================*
1568 * regionprintstats *
1569 *========================================================================*/
1570 void printregionstats(struct vmproc *vmp)
1572 struct vir_region *vr;
1573 struct phys_region *pr;
1574 vir_bytes used = 0, weighted = 0;
1575 region_iter v_iter;
1576 region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
1578 while((vr = region_get_iter(&v_iter))) {
1579 vir_bytes voffset;
1580 region_incr_iter(&v_iter);
1581 if(vr->flags & VR_DIRECT)
1582 continue;
1583 for(voffset = 0; voffset < vr->length; voffset+=VM_PAGE_SIZE) {
1584 if(!(pr = physblock_get(vr, voffset))) continue;
1585 used += VM_PAGE_SIZE;
1586 weighted += VM_PAGE_SIZE / pr->ph->refcount;
1590 printf("%6lukB %6lukB\n", used/1024, weighted/1024);
1592 return;
1595 /*===========================================================================*
1596 * get_clean_phys_region *
1597 *===========================================================================*/
1598 static struct phys_region *
1599 get_clean_phys_region(struct vmproc *vmp, vir_bytes vaddr, struct vir_region **ret_region)
1601 struct vir_region *region;
1602 vir_bytes mapaddr;
1603 struct phys_region *ph;
1605 mapaddr = vaddr;
1607 if(!(region = map_lookup(vmp, mapaddr, &ph)) || !ph) {
1608 printf("VM: get_clean_phys_region: 0x%lx not found\n", vaddr);
1609 return NULL;
1612 assert(mapaddr >= region->vaddr);
1613 assert(mapaddr < region->vaddr + region->length);
1615 /* If it's mapped more than once, make a copy. */
1616 assert(ph->ph->refcount > 0);
1617 if(ph->ph->refcount > 1) {
1618 if(!(ph = map_clone_ph_block(vmp, region,
1619 ph))) {
1620 printf("VM: get_clean_phys_region: ph copy failed\n");
1621 return NULL;
1625 assert(ph->ph->refcount == 1);
1627 *ret_region = region;
1629 return ph;
1632 static int getblock(struct vmproc *vmp, u64_t id, vir_bytes vaddr, int pages)
1634 yielded_t *yb;
1635 struct phys_region *ph;
1636 struct vir_region *region;
1637 yielded_avl *avl;
1638 block_id_t blockid;
1639 phys_bytes phaddr;
1640 int p;
1642 /* Try to get the yielded block */
1643 blockid.owner = vmp->vm_endpoint;
1644 blockid.id = id;
1645 avl = get_yielded_avl(blockid);
1646 if(!(yb = yielded_search(avl, blockid, AVL_EQUAL))) {
1647 return ESRCH;
1650 if(yb->pages != pages) {
1651 printf("VM: getblock: length mismatch (%d != %d)\n",
1652 pages, yb->pages);
1653 return EFAULT;
1656 phaddr = yb->physaddr;
1658 for(p = 0; p < pages; p++) {
1659 /* Get the intended phys region, make sure refcount is 1. */
1660 if(!(ph = get_clean_phys_region(vmp, vaddr, &region))) {
1661 printf("VM: getblock: not found for %d\n", vmp->vm_endpoint);
1662 return EINVAL;
1665 assert(ph->ph->refcount == 1);
1667 /* Free the block that is currently there. */
1668 free_mem(ABS2CLICK(ph->ph->phys), 1);
1670 /* Set the phys block to new addr and update pagetable. */
1671 USE(ph->ph, ph->ph->phys = phaddr;);
1672 if(map_ph_writept(vmp, region, ph) != OK) {
1673 /* Presumably it was mapped, so there is no reason
1674 * updating should fail.
1676 panic("do_get_block: couldn't write pt");
1679 vaddr += VM_PAGE_SIZE;
1680 phaddr += VM_PAGE_SIZE;
1683 /* Forget about the yielded block and free the struct. */
1684 freeyieldednode(yb, 0);
1686 return OK;
1689 static int yieldblock(struct vmproc *vmp, u64_t id,
1690 vir_bytes vaddr, yielded_t **retyb, int pages)
1692 yielded_t *newyb;
1693 vir_bytes mem_clicks, v, p, new_phaddr;
1694 struct vir_region *region;
1695 struct phys_region *ph = NULL, *prev_ph = NULL, *first_ph = NULL;
1696 yielded_avl *avl;
1697 block_id_t blockid;
1699 /* Makes no sense if yielded block ID already exists, and
1700 * is likely a serious bug in the caller.
1702 blockid.id = id;
1703 blockid.owner = vmp->vm_endpoint;
1704 avl = get_yielded_avl(blockid);
1705 if(yielded_search(avl, blockid, AVL_EQUAL)) {
1706 printf("!");
1707 return EINVAL;
1710 if((vaddr % VM_PAGE_SIZE) || pages < 1) return EFAULT;
1712 v = vaddr;
1713 for(p = 0; p < pages; p++) {
1714 if(!(region = map_lookup(vmp, v, &ph)) || !ph) {
1715 printf("VM: do_yield_block: not found for %d\n",
1716 vmp->vm_endpoint);
1717 return EINVAL;
1719 if(!(region->flags & VR_ANON)) {
1720 printf("VM: yieldblock: non-anon 0x%lx\n", v);
1721 return EFAULT;
1723 if(ph->ph->refcount != 1) {
1724 printf("VM: do_yield_block: mapped not once for %d\n",
1725 vmp->vm_endpoint);
1726 return EFAULT;
1728 if(prev_ph) {
1729 if(ph->ph->phys != prev_ph->ph->phys + VM_PAGE_SIZE) {
1730 printf("VM: physically discontiguous yield\n");
1731 return EINVAL;
1734 prev_ph = ph;
1735 if(!first_ph) first_ph = ph;
1736 v += VM_PAGE_SIZE;
1739 /* Make a new block to record the yielding in. */
1740 if(!SLABALLOC(newyb)) {
1741 return ENOMEM;
1744 assert(!(ph->ph->phys % VM_PAGE_SIZE));
1746 if((mem_clicks = alloc_mem(pages, PAF_CLEAR)) == NO_MEM) {
1747 SLABFREE(newyb);
1748 return ENOMEM;
1751 /* Update yielded block info. */
1752 USE(newyb,
1753 newyb->id = blockid;
1754 newyb->physaddr = first_ph->ph->phys;
1755 newyb->pages = pages;
1756 newyb->younger = NULL;);
1758 new_phaddr = CLICK2ABS(mem_clicks);
1760 /* Set new phys block to new addr and update pagetable. */
1761 v = vaddr;
1762 for(p = 0; p < pages; p++) {
1763 region = map_lookup(vmp, v, &ph);
1764 assert(region && ph);
1765 assert(ph->ph->refcount == 1);
1766 USE(ph->ph,
1767 ph->ph->phys = new_phaddr;);
1768 if(map_ph_writept(vmp, region, ph) != OK) {
1769 /* Presumably it was mapped, so there is no reason
1770 * updating should fail.
1772 panic("yield_block: couldn't write pt");
1774 v += VM_PAGE_SIZE;
1775 new_phaddr += VM_PAGE_SIZE;
1778 /* Remember yielded block. */
1780 yielded_insert(avl, newyb);
1781 vmp->vm_yielded++;
1783 /* Add to LRU list too. It's the youngest block. */
1784 LRUCHECK;
1786 if(lru_youngest) {
1787 USE(lru_youngest,
1788 lru_youngest->younger = newyb;);
1789 } else {
1790 lru_oldest = newyb;
1793 USE(newyb,
1794 newyb->older = lru_youngest;);
1796 lru_youngest = newyb;
1798 LRUCHECK;
1800 if(retyb)
1801 *retyb = newyb;
1803 return OK;
1806 /*===========================================================================*
1807 * do_forgetblocks *
1808 *===========================================================================*/
1809 int do_forgetblocks(message *m)
1811 int n;
1812 struct vmproc *vmp;
1813 endpoint_t caller = m->m_source;
1815 if(vm_isokendpt(caller, &n) != OK)
1816 panic("do_yield_block: message from strange source: %d",
1817 m->m_source);
1819 vmp = &vmproc[n];
1821 free_yielded_proc(vmp);
1823 return OK;
1826 /*===========================================================================*
1827 * do_forgetblock *
1828 *===========================================================================*/
1829 int do_forgetblock(message *m)
1831 int n;
1832 struct vmproc *vmp;
1833 endpoint_t caller = m->m_source;
1834 yielded_t *yb;
1835 u64_t id;
1836 block_id_t blockid;
1837 yielded_avl *avl;
1839 if(vm_isokendpt(caller, &n) != OK)
1840 panic("do_yield_block: message from strange source: %d",
1841 m->m_source);
1843 vmp = &vmproc[n];
1845 id = make64(m->VMFB_IDLO, m->VMFB_IDHI);
1847 blockid.id = id;
1848 blockid.owner = vmp->vm_endpoint;
1849 avl = get_yielded_avl(blockid);
1850 if((yb = yielded_search(avl, blockid, AVL_EQUAL))) {
1851 freeyieldednode(yb, 1);
1854 return OK;
1857 /*===========================================================================*
1858 * do_yieldblockgetblock *
1859 *===========================================================================*/
1860 int do_yieldblockgetblock(message *m)
1862 u64_t yieldid, getid;
1863 int n;
1864 endpoint_t caller = m->m_source;
1865 struct vmproc *vmp;
1866 yielded_t *yb = NULL;
1867 int r = ESRCH;
1868 int pages;
1870 if(vm_isokendpt(caller, &n) != OK)
1871 panic("do_yieldblockgetblock: message from strange source: %d",
1872 m->m_source);
1874 vmp = &vmproc[n];
1876 pages = m->VMYBGB_LEN / VM_PAGE_SIZE;
1878 if((m->VMYBGB_LEN % VM_PAGE_SIZE) || pages < 1) {
1879 static int printed;
1880 if(!printed) {
1881 printed = 1;
1882 printf("vm: non-page-aligned or short block length\n");
1884 return EFAULT;
1887 yieldid = make64(m->VMYBGB_YIELDIDLO, m->VMYBGB_YIELDIDHI);
1888 getid = make64(m->VMYBGB_GETIDLO, m->VMYBGB_GETIDHI);
1890 if(cmp64(yieldid, VM_BLOCKID_NONE) != 0) {
1891 /* A block was given to yield. */
1892 yieldblock(vmp, yieldid, (vir_bytes) m->VMYBGB_VADDR, &yb,
1893 pages);
1896 if(cmp64(getid, VM_BLOCKID_NONE) != 0) {
1897 /* A block was given to get. */
1898 r = getblock(vmp, getid, (vir_bytes) m->VMYBGB_VADDR, pages);
1901 return r;
1904 void map_setparent(struct vmproc *vmp)
1906 region_iter iter;
1907 struct vir_region *vr;
1908 region_start_iter_least(&vmp->vm_regions_avl, &iter);
1909 while((vr = region_get_iter(&iter))) {
1910 USE(vr, vr->parent = vmp;);
1911 region_incr_iter(&iter);
1915 int physregions(struct vir_region *vr)
1917 int n = 0;
1918 vir_bytes voffset;
1919 for(voffset = 0; voffset < vr->length; voffset += VM_PAGE_SIZE) {
1920 if(physblock_get(vr, voffset))
1921 n++;
1923 return n;