tty: try more often to get the config byte.
[minix.git] / servers / vm / region.c
blob429df185f40e6ae2e7e1cb533af7aa0a76b4fa78
2 #define _SYSTEM 1
4 #include <minix/com.h>
5 #include <minix/callnr.h>
6 #include <minix/type.h>
7 #include <minix/config.h>
8 #include <minix/const.h>
9 #include <minix/sysutil.h>
10 #include <minix/syslib.h>
11 #include <minix/debug.h>
12 #include <minix/bitmap.h>
14 #include <sys/mman.h>
16 #include <limits.h>
17 #include <string.h>
18 #include <errno.h>
19 #include <assert.h>
20 #include <stdint.h>
21 #include <memory.h>
23 #include "vm.h"
24 #include "proto.h"
25 #include "util.h"
26 #include "glo.h"
27 #include "region.h"
28 #include "sanitycheck.h"
29 #include "physravl.h"
30 #include "memlist.h"
32 /* Should a physblock be mapped writable? */
33 #define WRITABLE(r, pb) \
34 (((r)->flags & (VR_DIRECT | VR_SHARED)) || \
35 (((r)->flags & VR_WRITABLE) && (pb)->refcount == 1))
37 FORWARD _PROTOTYPE(int map_new_physblock, (struct vmproc *vmp,
38 struct vir_region *region, vir_bytes offset, vir_bytes length,
39 phys_bytes what, u32_t allocflags, int written));
41 FORWARD _PROTOTYPE(int map_ph_writept, (struct vmproc *vmp, struct vir_region *vr,
42 struct phys_region *pr));
44 FORWARD _PROTOTYPE(struct vir_region *map_copy_region, (struct vmproc *vmp, struct vir_region *vr));
46 FORWARD _PROTOTYPE(struct phys_region *map_clone_ph_block, (struct vmproc *vmp,
47 struct vir_region *region, struct phys_region *ph, physr_iter *iter));
49 PRIVATE char *map_name(struct vir_region *vr)
51 static char name[100];
52 char *typename, *tag;
53 int type = vr->flags & (VR_ANON|VR_DIRECT);
54 switch(type) {
55 case VR_ANON:
56 typename = "anonymous";
57 break;
58 case VR_DIRECT:
59 typename = "direct";
60 break;
61 default:
62 panic("unknown mapping type: %d", type);
65 switch(vr->tag) {
66 case VRT_TEXT:
67 tag = "text";
68 break;
69 case VRT_STACK:
70 tag = "stack";
71 break;
72 case VRT_HEAP:
73 tag = "heap";
74 break;
75 case VRT_NONE:
76 tag = "untagged";
77 break;
78 default:
79 tag = "unknown tag value";
80 break;
83 sprintf(name, "%s, %s", typename, tag);
85 return name;
88 PUBLIC void map_printregion(struct vmproc *vmp, struct vir_region *vr)
90 physr_iter iter;
91 struct phys_region *ph;
92 printf("map_printmap: map_name: %s\n", map_name(vr));
93 printf("\t%s (len 0x%lx, %dkB), %s\n",
94 arch_map2str(vmp, vr->vaddr), vr->length,
95 vr->length/1024, map_name(vr));
96 printf("\t\tphysblocks:\n");
97 physr_start_iter_least(vr->phys, &iter);
98 while((ph = physr_get_iter(&iter))) {
99 printf("\t\t@ %s (refs %d): phys 0x%lx len 0x%lx\n",
100 arch_map2str(vmp, vr->vaddr + ph->offset),
101 ph->ph->refcount, ph->ph->phys, ph->ph->length);
102 physr_incr_iter(&iter);
106 /*===========================================================================*
107 * map_printmap *
108 *===========================================================================*/
109 PUBLIC void map_printmap(vmp)
110 struct vmproc *vmp;
112 struct vir_region *vr;
114 printf("memory regions in process %d:\n", vmp->vm_endpoint);
115 for(vr = vmp->vm_regions; vr; vr = vr->next) {
116 map_printregion(vmp, vr);
121 #if SANITYCHECKS
124 /*===========================================================================*
125 * map_sanitycheck_pt *
126 *===========================================================================*/
127 PRIVATE int map_sanitycheck_pt(struct vmproc *vmp,
128 struct vir_region *vr, struct phys_region *pr)
130 struct phys_block *pb = pr->ph;
131 int rw;
132 int r;
134 if(!(vmp->vm_flags & VMF_HASPT))
135 return OK;
137 if(WRITABLE(vr, pb))
138 rw = PTF_WRITE;
139 else
140 rw = 0;
142 r = pt_writemap(&vmp->vm_pt, vr->vaddr + pr->offset,
143 pb->phys, pb->length, PTF_PRESENT | PTF_USER | rw, WMF_VERIFY);
145 if(r != OK) {
146 printf("proc %d phys_region 0x%lx sanity check failed\n",
147 vmp->vm_endpoint, pr->offset);
148 map_printregion(vmp, vr);
151 return r;
154 /*===========================================================================*
155 * map_sanitycheck *
156 *===========================================================================*/
157 PUBLIC void map_sanitycheck(char *file, int line)
159 struct vmproc *vmp;
161 /* Macro for looping over all physical blocks of all regions of
162 * all processes.
164 #define ALLREGIONS(regioncode, physcode) \
165 for(vmp = vmproc; vmp < &vmproc[VMP_NR]; vmp++) { \
166 struct vir_region *vr; \
167 if(!(vmp->vm_flags & VMF_INUSE)) \
168 continue; \
169 for(vr = vmp->vm_regions; vr; vr = vr->next) { \
170 physr_iter iter; \
171 struct phys_region *pr; \
172 regioncode; \
173 physr_start_iter_least(vr->phys, &iter); \
174 while((pr = physr_get_iter(&iter))) { \
175 physcode; \
176 physr_incr_iter(&iter); \
181 #define MYSLABSANE(s) MYASSERT(slabsane_f(__FILE__, __LINE__, s, sizeof(*(s))))
182 /* Basic pointers check. */
183 ALLREGIONS(MYSLABSANE(vr),MYSLABSANE(pr); MYSLABSANE(pr->ph);MYSLABSANE(pr->parent));
184 ALLREGIONS(/* MYASSERT(vr->parent == vmp) */,MYASSERT(pr->parent == vr););
186 /* Do counting for consistency check. */
187 ALLREGIONS(;,USE(pr->ph, pr->ph->seencount = 0;););
188 ALLREGIONS(;,USE(pr->ph, pr->ph->seencount++;);
189 if(pr->ph->seencount == 1) {
190 MYASSERT(usedpages_add(pr->ph->phys,
191 pr->ph->length) == OK);
195 /* Do consistency check. */
196 ALLREGIONS(if(vr->next) {
197 MYASSERT(vr->vaddr < vr->next->vaddr);
198 MYASSERT(vr->vaddr + vr->length <= vr->next->vaddr);
200 MYASSERT(!(vr->vaddr % VM_PAGE_SIZE));,
201 if(pr->ph->refcount != pr->ph->seencount) {
202 map_printmap(vmp);
203 printf("ph in vr 0x%lx: 0x%lx-0x%lx refcount %d "
204 "but seencount %lu\n",
205 vr, pr->offset,
206 pr->offset + pr->ph->length,
207 pr->ph->refcount, pr->ph->seencount);
210 int n_others = 0;
211 struct phys_region *others;
212 if(pr->ph->refcount > 0) {
213 MYASSERT(pr->ph->firstregion);
214 if(pr->ph->refcount == 1) {
215 MYASSERT(pr->ph->firstregion == pr);
217 } else {
218 MYASSERT(!pr->ph->firstregion);
220 for(others = pr->ph->firstregion; others;
221 others = others->next_ph_list) {
222 MYSLABSANE(others);
223 MYASSERT(others->ph == pr->ph);
224 n_others++;
226 MYASSERT(pr->ph->refcount == n_others);
228 MYASSERT(pr->ph->refcount == pr->ph->seencount);
229 MYASSERT(!(pr->offset % VM_PAGE_SIZE));
230 MYASSERT(!(pr->ph->length % VM_PAGE_SIZE)););
231 ALLREGIONS(,MYASSERT(map_sanitycheck_pt(vmp, vr, pr) == OK));
233 #endif
236 /*=========================================================================*
237 * map_ph_writept *
238 *=========================================================================*/
239 PRIVATE int map_ph_writept(struct vmproc *vmp, struct vir_region *vr,
240 struct phys_region *pr)
242 int rw;
243 struct phys_block *pb = pr->ph;
245 assert(!(vr->vaddr % VM_PAGE_SIZE));
246 assert(!(pb->length % VM_PAGE_SIZE));
247 assert(!(pr->offset % VM_PAGE_SIZE));
248 assert(pb->refcount > 0);
250 if(WRITABLE(vr, pb))
251 rw = PTF_WRITE;
252 else
253 rw = 0;
255 if(pt_writemap(&vmp->vm_pt, vr->vaddr + pr->offset,
256 pb->phys, pb->length, PTF_PRESENT | PTF_USER | rw,
257 #if SANITYCHECKS
258 !pr->written ? 0 :
259 #endif
260 WMF_OVERWRITE) != OK) {
261 printf("VM: map_writept: pt_writemap failed\n");
262 return ENOMEM;
265 #if SANITYCHECKS
266 USE(pr, pr->written = 1;);
267 #endif
269 return OK;
272 /*===========================================================================*
273 * region_find_slot *
274 *===========================================================================*/
275 PRIVATE vir_bytes region_find_slot(struct vmproc *vmp,
276 vir_bytes minv, vir_bytes maxv, vir_bytes length,
277 struct vir_region **prev)
279 struct vir_region *firstregion = vmp->vm_regions, *prevregion = NULL;
280 vir_bytes startv;
281 int foundflag = 0;
283 SANITYCHECK(SCL_FUNCTIONS);
285 /* We must be in paged mode to be able to do this. */
286 assert(vm_paged);
288 /* Length must be reasonable. */
289 assert(length > 0);
291 /* Special case: allow caller to set maxv to 0 meaning 'I want
292 * it to be mapped in right here.'
294 if(maxv == 0) {
295 maxv = minv + length;
297 /* Sanity check. */
298 if(maxv <= minv) {
299 printf("region_find_slot: minv 0x%lx and bytes 0x%lx\n",
300 minv, length);
301 map_printmap(vmp);
302 return (vir_bytes) -1;
306 /* Basic input sanity checks. */
307 assert(!(length % VM_PAGE_SIZE));
308 if(minv >= maxv) {
309 printf("VM: 1 minv: 0x%lx maxv: 0x%lx length: 0x%lx\n",
310 minv, maxv, length);
312 assert(minv < maxv);
313 assert(minv + length <= maxv);
315 #define FREEVRANGE(rangestart, rangeend, foundcode) { \
316 vir_bytes frstart = (rangestart), frend = (rangeend); \
317 frstart = MAX(frstart, minv); \
318 frend = MIN(frend, maxv); \
319 if(frend > frstart && (frend - frstart) >= length) { \
320 startv = frstart; \
321 foundflag = 1; \
322 foundcode; \
325 /* This is the free virtual address space before the first region. */
326 FREEVRANGE(0, firstregion ? firstregion->vaddr : VM_DATATOP, ;);
328 if(!foundflag) {
329 struct vir_region *vr;
330 for(vr = vmp->vm_regions; vr && !foundflag; vr = vr->next) {
331 FREEVRANGE(vr->vaddr + vr->length,
332 vr->next ? vr->next->vaddr : VM_DATATOP,
333 prevregion = vr;);
337 if(!foundflag) {
338 printf("VM: region_find_slot: no 0x%lx bytes found for %d between 0x%lx and 0x%lx\n",
339 length, vmp->vm_endpoint, minv, maxv);
340 map_printmap(vmp);
341 return (vir_bytes) -1;
344 #if SANITYCHECKS
345 if(prevregion) assert(prevregion->vaddr < startv);
346 #endif
348 /* However we got it, startv must be in the requested range. */
349 assert(startv >= minv);
350 assert(startv < maxv);
351 assert(startv + length <= maxv);
353 if (prev)
354 *prev = prevregion;
355 return startv;
358 /*===========================================================================*
359 * map_page_region *
360 *===========================================================================*/
361 PUBLIC struct vir_region *map_page_region(vmp, minv, maxv, length,
362 what, flags, mapflags)
363 struct vmproc *vmp;
364 vir_bytes minv;
365 vir_bytes maxv;
366 vir_bytes length;
367 vir_bytes what;
368 u32_t flags;
369 int mapflags;
371 struct vir_region *prevregion = NULL, *newregion;
372 vir_bytes startv;
373 struct phys_region *ph;
374 physr_avl *phavl;
376 assert(!(length % VM_PAGE_SIZE));
378 SANITYCHECK(SCL_FUNCTIONS);
380 startv = region_find_slot(vmp, minv, maxv, length, &prevregion);
381 if (startv == (vir_bytes) -1)
382 return NULL;
384 /* Now we want a new region. */
385 if(!SLABALLOC(newregion)) {
386 printf("VM: map_page_region: allocating region failed\n");
387 return NULL;
390 /* Fill in node details. */
391 USE(newregion,
392 newregion->vaddr = startv;
393 newregion->length = length;
394 newregion->flags = flags;
395 newregion->tag = VRT_NONE;
396 newregion->parent = vmp;);
398 SLABALLOC(phavl);
399 if(!phavl) {
400 printf("VM: map_page_region: allocating phys avl failed\n");
401 SLABFREE(newregion);
402 return NULL;
404 USE(newregion, newregion->phys = phavl;);
406 physr_init(newregion->phys);
408 /* If we know what we're going to map to, map it right away. */
409 if(what != MAP_NONE) {
410 assert(!(what % VM_PAGE_SIZE));
411 assert(!(startv % VM_PAGE_SIZE));
412 assert(!(mapflags & MF_PREALLOC));
413 if(map_new_physblock(vmp, newregion, 0, length,
414 what, PAF_CLEAR, 0) != OK) {
415 printf("VM: map_new_physblock failed\n");
416 USE(newregion,
417 SLABFREE(newregion->phys););
418 SLABFREE(newregion);
419 return NULL;
423 if((flags & VR_ANON) && (mapflags & MF_PREALLOC)) {
424 if(map_handle_memory(vmp, newregion, 0, length, 1) != OK) {
425 printf("VM: map_page_region: prealloc failed\n");
426 USE(newregion,
427 SLABFREE(newregion->phys););
428 SLABFREE(newregion);
429 return NULL;
433 /* Link it. */
434 if(prevregion) {
435 assert(prevregion->vaddr < newregion->vaddr);
436 USE(newregion, newregion->next = prevregion->next;);
437 USE(prevregion, prevregion->next = newregion;);
438 } else {
439 USE(newregion, newregion->next = vmp->vm_regions;);
440 vmp->vm_regions = newregion;
443 #if SANITYCHECKS
444 assert(startv == newregion->vaddr);
445 if(newregion->next) {
446 assert(newregion->vaddr < newregion->next->vaddr);
448 #endif
450 SANITYCHECK(SCL_FUNCTIONS);
452 return newregion;
455 /*===========================================================================*
456 * pb_unreferenced *
457 *===========================================================================*/
458 PUBLIC void pb_unreferenced(struct vir_region *region, struct phys_region *pr)
460 struct phys_block *pb;
461 int remap = 0;
463 pb = pr->ph;
464 assert(pb->refcount > 0);
465 USE(pb, pb->refcount--;);
466 assert(pb->refcount >= 0);
468 if(pb->firstregion == pr) {
469 USE(pb, pb->firstregion = pr->next_ph_list;);
470 } else {
471 struct phys_region *others;
473 for(others = pb->firstregion; others;
474 others = others->next_ph_list) {
475 assert(others->ph == pb);
476 if(others->next_ph_list == pr) {
477 USE(others, others->next_ph_list = pr->next_ph_list;);
478 break;
482 assert(others); /* Otherwise, wasn't on the list. */
485 if(pb->refcount == 0) {
486 assert(!pb->firstregion);
487 if(region->flags & VR_ANON) {
488 free_mem(ABS2CLICK(pb->phys),
489 ABS2CLICK(pb->length));
490 } else if(region->flags & VR_DIRECT) {
491 ; /* No action required. */
492 } else {
493 panic("strange phys flags");
495 SLABFREE(pb);
496 } else {
497 struct phys_region *others;
498 int n = 0;
500 for(others = pb->firstregion; others;
501 others = others->next_ph_list) {
502 if(WRITABLE(region, others->ph)) {
503 if(map_ph_writept(others->parent->parent,
504 others->parent, others) != OK) {
505 printf("VM: map_ph_writept failed unexpectedly\n");
508 n++;
510 assert(n == pb->refcount);
514 PRIVATE struct phys_region *reset_physr_iter(struct vir_region *region,
515 physr_iter *iter, vir_bytes offset)
517 struct phys_region *ph;
519 physr_start_iter(region->phys, iter, offset, AVL_EQUAL);
520 ph = physr_get_iter(iter);
521 assert(ph);
522 assert(ph->offset == offset);
524 return ph;
527 /*===========================================================================*
528 * map_subfree *
529 *===========================================================================*/
530 PRIVATE int map_subfree(struct vmproc *vmp,
531 struct vir_region *region, vir_bytes len)
533 struct phys_region *pr, *nextpr;
534 physr_iter iter;
537 #if SANITYCHECKS
539 physr_start_iter_least(region->phys, &iter);
540 while((pr = physr_get_iter(&iter))) {
541 struct phys_region *others;
542 struct phys_block *pb;
544 pb = pr->ph;
546 for(others = pb->firstregion; others;
547 others = others->next_ph_list) {
548 assert(others->ph == pb);
550 physr_incr_iter(&iter);
553 #endif
555 physr_start_iter_least(region->phys, &iter);
556 while((pr = physr_get_iter(&iter))) {
557 physr_incr_iter(&iter);
558 if(pr->offset >= len)
559 break;
560 if(pr->offset + pr->ph->length <= len) {
561 pb_unreferenced(region, pr);
562 physr_remove(region->phys, pr->offset);
563 physr_start_iter_least(region->phys, &iter);
564 SLABFREE(pr);
565 } else {
566 vir_bytes sublen;
567 assert(len > pr->offset);
568 assert(len < pr->offset + pr->ph->length);
569 assert(pr->ph->refcount > 0);
570 sublen = len - pr->offset;
571 assert(!(sublen % VM_PAGE_SIZE));
572 assert(sublen < pr->ph->length);
573 if(pr->ph->refcount > 1) {
574 int r;
575 if(!(pr = map_clone_ph_block(vmp, region,
576 pr, &iter)))
577 return ENOMEM;
579 assert(pr->ph->refcount == 1);
580 if(!(region->flags & VR_DIRECT)) {
581 free_mem(ABS2CLICK(pr->ph->phys), ABS2CLICK(sublen));
583 USE(pr, pr->offset += sublen;);
584 USE(pr->ph,
585 pr->ph->phys += sublen;
586 pr->ph->length -= sublen;);
587 assert(!(pr->offset % VM_PAGE_SIZE));
588 assert(!(pr->ph->phys % VM_PAGE_SIZE));
589 assert(!(pr->ph->length % VM_PAGE_SIZE));
593 return OK;
596 /*===========================================================================*
597 * map_free *
598 *===========================================================================*/
599 PRIVATE int map_free(struct vmproc *vmp, struct vir_region *region)
601 int r;
603 if((r=map_subfree(vmp, region, region->length)) != OK) {
604 printf("%d\n", __LINE__);
605 return r;
608 USE(region,
609 SLABFREE(region->phys););
610 SLABFREE(region);
612 return OK;
615 /*========================================================================*
616 * map_free_proc *
617 *========================================================================*/
618 PUBLIC int map_free_proc(vmp)
619 struct vmproc *vmp;
621 struct vir_region *r, *nextr;
623 SANITYCHECK(SCL_FUNCTIONS);
625 for(r = vmp->vm_regions; r; r = nextr) {
626 nextr = r->next;
627 SANITYCHECK(SCL_DETAIL);
628 #if SANITYCHECKS
629 nocheck++;
630 #endif
631 map_free(vmp, r);
632 vmp->vm_regions = nextr; /* For sanity checks. */
633 #if SANITYCHECKS
634 nocheck--;
635 #endif
636 SANITYCHECK(SCL_DETAIL);
639 vmp->vm_regions = NULL;
641 SANITYCHECK(SCL_FUNCTIONS);
643 return OK;
646 /*===========================================================================*
647 * map_lookup *
648 *===========================================================================*/
649 PUBLIC struct vir_region *map_lookup(vmp, offset)
650 struct vmproc *vmp;
651 vir_bytes offset;
653 struct vir_region *r;
655 SANITYCHECK(SCL_FUNCTIONS);
657 if(!vmp->vm_regions)
658 panic("process has no regions: %d", vmp->vm_endpoint);
660 for(r = vmp->vm_regions; r; r = r->next) {
661 if(offset >= r->vaddr && offset < r->vaddr + r->length)
662 return r;
665 SANITYCHECK(SCL_FUNCTIONS);
667 return NULL;
670 PRIVATE u32_t vrallocflags(u32_t flags)
672 u32_t allocflags = 0;
674 if(flags & VR_PHYS64K)
675 allocflags |= PAF_ALIGN64K;
676 if(flags & VR_LOWER16MB)
677 allocflags |= PAF_LOWER16MB;
678 if(flags & VR_LOWER1MB)
679 allocflags |= PAF_LOWER1MB;
680 if(flags & VR_CONTIG)
681 allocflags |= PAF_CONTIG;
683 return allocflags;
686 /*===========================================================================*
687 * map_new_physblock *
688 *===========================================================================*/
689 PRIVATE int map_new_physblock(vmp, region, start_offset, length,
690 what_mem, allocflags, written)
691 struct vmproc *vmp;
692 struct vir_region *region;
693 vir_bytes start_offset;
694 vir_bytes length;
695 phys_bytes what_mem;
696 u32_t allocflags;
697 int written;
699 struct memlist *memlist, given, *ml;
700 int used_memlist, r;
701 vir_bytes mapped = 0;
702 vir_bytes offset = start_offset;
704 SANITYCHECK(SCL_FUNCTIONS);
706 assert(!(length % VM_PAGE_SIZE));
708 if((region->flags & VR_CONTIG) &&
709 (start_offset > 0 || length < region->length)) {
710 printf("VM: map_new_physblock: non-full allocation requested\n");
711 return EFAULT;
714 /* Memory for new physical block. */
715 if(what_mem == MAP_NONE) {
716 allocflags |= vrallocflags(region->flags);
718 if(!(memlist = alloc_mem_in_list(length, allocflags))) {
719 printf("map_new_physblock: couldn't allocate\n");
720 return ENOMEM;
722 used_memlist = 1;
723 } else {
724 given.phys = what_mem;
725 given.length = length;
726 given.next = NULL;
727 memlist = &given;
728 used_memlist = 0;
729 assert(given.length);
732 r = OK;
734 for(ml = memlist; ml; ml = ml->next) {
735 struct phys_region *newphysr = NULL;
736 struct phys_block *newpb = NULL;
738 /* Allocate things necessary for this chunk of memory. */
739 if(!SLABALLOC(newphysr) || !SLABALLOC(newpb)) {
740 printf("map_new_physblock: no memory for the ph slabs\n");
741 if(newphysr) SLABFREE(newphysr);
742 if(newpb) SLABFREE(newpb);
743 r = ENOMEM;
744 break;
747 /* New physical block. */
748 assert(!(ml->phys % VM_PAGE_SIZE));
750 USE(newpb,
751 newpb->phys = ml->phys;
752 newpb->refcount = 1;
753 newpb->length = ml->length;
754 newpb->firstregion = newphysr;);
756 /* New physical region. */
757 USE(newphysr,
758 newphysr->offset = offset;
759 newphysr->ph = newpb;
760 newphysr->parent = region;
761 /* No other references to this block. */
762 newphysr->next_ph_list = NULL;);
763 #if SANITYCHECKS
764 USE(newphysr, newphysr->written = written;);
765 #endif
767 /* Update pagetable. */
768 if(map_ph_writept(vmp, region, newphysr) != OK) {
769 printf("map_new_physblock: map_ph_writept failed\n");
770 r = ENOMEM;
771 break;
774 physr_insert(region->phys, newphysr);
776 offset += ml->length;
777 mapped += ml->length;
780 if(used_memlist) {
781 if(r != OK) {
782 offset = start_offset;
783 /* Things did not go well. Undo everything. */
784 for(ml = memlist; ml; ml = ml->next) {
785 struct phys_region *physr;
786 offset += ml->length;
787 if((physr = physr_search(region->phys, offset,
788 AVL_EQUAL))) {
789 assert(physr->ph->refcount == 1);
790 pb_unreferenced(region, physr);
791 physr_remove(region->phys, physr->offset);
792 SLABFREE(physr);
795 } else assert(mapped == length);
797 /* Always clean up the memlist itself, even if everything
798 * worked we're not using the memlist nodes any more. And
799 * the memory they reference is either freed above or in use.
801 free_mem_list(memlist, 0);
804 SANITYCHECK(SCL_FUNCTIONS);
806 return r;
809 /*===========================================================================*
810 * map_clone_ph_block *
811 *===========================================================================*/
812 PRIVATE struct phys_region *map_clone_ph_block(vmp, region, ph, iter)
813 struct vmproc *vmp;
814 struct vir_region *region;
815 struct phys_region *ph;
816 physr_iter *iter;
818 vir_bytes offset, length;
819 struct memlist *ml;
820 u32_t allocflags;
821 phys_bytes physaddr;
822 struct phys_region *newpr;
823 int written = 0;
824 #if SANITYCHECKS
825 written = ph->written;
826 #endif
827 SANITYCHECK(SCL_FUNCTIONS);
829 /* Warning: this function will free the passed
830 * phys_region *ph and replace it (in the same offset)
831 * with one or more others! So both the pointer to it
832 * and any iterators over the phys_regions in the vir_region
833 * will be invalid on successful return. (Iterators over
834 * the vir_region could be invalid on unsuccessful return too.)
837 /* This function takes a physical block, copies its contents
838 * into newly allocated memory, and replaces the single physical
839 * block by one or more physical blocks with refcount 1 with the
840 * same contents as the original. In other words, a fragmentable
841 * version of map_copy_ph_block().
844 /* Remember where and how much. */
845 offset = ph->offset;
846 length = ph->ph->length;
847 physaddr = ph->ph->phys;
849 /* Now unlink the original physical block so we can replace
850 * it with new ones.
853 SANITYCHECK(SCL_DETAIL);
854 SLABSANE(ph);
855 SLABSANE(ph->ph);
856 assert(ph->ph->refcount > 1);
857 pb_unreferenced(region, ph);
858 assert(ph->ph->refcount >= 1);
859 physr_remove(region->phys, offset);
860 SLABFREE(ph);
862 SANITYCHECK(SCL_DETAIL);
864 /* Put new free memory in. */
865 allocflags = vrallocflags(region->flags);
866 assert(!(allocflags & PAF_CONTIG));
867 assert(!(allocflags & PAF_CLEAR));
869 if(map_new_physblock(vmp, region, offset, length,
870 MAP_NONE, allocflags, written) != OK) {
871 /* XXX original range now gone. */
872 free_mem_list(ml, 0);
873 printf("VM: map_clone_ph_block: map_new_physblock failed.\n");
874 return NULL;
877 /* Copy the block to the new memory.
878 * Can only fail if map_new_physblock didn't do what we asked.
880 if(copy_abs2region(physaddr, region, offset, length) != OK)
881 panic("copy_abs2region failed, no good reason for that");
883 newpr = physr_search(region->phys, offset, AVL_EQUAL);
884 assert(newpr);
885 assert(newpr->offset == offset);
887 if(iter) {
888 physr_start_iter(region->phys, iter, offset, AVL_EQUAL);
889 assert(physr_get_iter(iter) == newpr);
892 SANITYCHECK(SCL_FUNCTIONS);
894 return newpr;
898 /*===========================================================================*
899 * map_pf *
900 *===========================================================================*/
901 PUBLIC int map_pf(vmp, region, offset, write)
902 struct vmproc *vmp;
903 struct vir_region *region;
904 vir_bytes offset;
905 int write;
907 vir_bytes virpage;
908 struct phys_region *ph;
909 int r = OK;
911 assert(offset >= 0);
912 assert(offset < region->length);
914 assert(region->flags & VR_ANON);
915 assert(!(region->vaddr % VM_PAGE_SIZE));
917 virpage = offset - offset % VM_PAGE_SIZE;
919 SANITYCHECK(SCL_FUNCTIONS);
921 if((ph = physr_search(region->phys, offset, AVL_LESS_EQUAL)) &&
922 (ph->offset <= offset && offset < ph->offset + ph->ph->length)) {
923 phys_bytes blockoffset = ph->offset;
924 /* Pagefault in existing block. Do copy-on-write. */
925 assert(write);
926 assert(region->flags & VR_WRITABLE);
927 assert(ph->ph->refcount > 0);
929 if(WRITABLE(region, ph->ph)) {
930 r = map_ph_writept(vmp, region, ph);
931 if(r != OK)
932 printf("map_ph_writept failed\n");
933 } else {
934 if(ph->ph->refcount > 0
935 && ph->ph->share_flag != PBSH_COW) {
936 printf("VM: write RO mapped pages.\n");
937 return EFAULT;
938 } else {
939 if(!map_clone_ph_block(vmp, region, ph, NULL))
940 r = ENOMEM;
943 } else {
944 /* Pagefault in non-existing block. Map in new block. */
945 if(map_new_physblock(vmp, region, virpage,
946 VM_PAGE_SIZE, MAP_NONE, PAF_CLEAR, 0) != OK) {
947 printf("map_new_physblock failed\n");
948 r = ENOMEM;
952 SANITYCHECK(SCL_FUNCTIONS);
954 if(r != OK) {
955 printf("VM: map_pf: failed (%d)\n", r);
956 return r;
959 #if SANITYCHECKS
960 if(OK != pt_checkrange(&vmp->vm_pt, region->vaddr+virpage,
961 VM_PAGE_SIZE, write)) {
962 panic("map_pf: pt_checkrange failed: %d", r);
964 #endif
966 return r;
969 /*===========================================================================*
970 * map_handle_memory *
971 *===========================================================================*/
972 PUBLIC int map_handle_memory(vmp, region, offset, length, write)
973 struct vmproc *vmp;
974 struct vir_region *region;
975 vir_bytes offset, length;
976 int write;
978 struct phys_region *physr, *nextphysr;
979 int changes = 0;
980 physr_iter iter;
982 #define FREE_RANGE_HERE(er1, er2) { \
983 struct phys_region *r1 = (er1), *r2 = (er2); \
984 vir_bytes start = offset, end = offset + length; \
985 if(r1) { \
986 start = MAX(start, r1->offset + r1->ph->length); } \
987 if(r2) { \
988 end = MIN(end, r2->offset); } \
989 if(start < end) { \
990 int r; \
991 SANITYCHECK(SCL_DETAIL); \
992 if(map_new_physblock(vmp, region, start, \
993 end-start, MAP_NONE, PAF_CLEAR, 0) != OK) { \
994 SANITYCHECK(SCL_DETAIL); \
995 return ENOMEM; \
997 changes++; \
1001 SANITYCHECK(SCL_FUNCTIONS);
1003 assert(region->flags & VR_ANON);
1004 assert(!(region->vaddr % VM_PAGE_SIZE));
1005 assert(!(offset % VM_PAGE_SIZE));
1006 assert(!(length % VM_PAGE_SIZE));
1007 assert(!write || (region->flags & VR_WRITABLE));
1009 physr_start_iter(region->phys, &iter, offset, AVL_LESS_EQUAL);
1010 physr = physr_get_iter(&iter);
1012 if(!physr) {
1013 physr_start_iter(region->phys, &iter, offset, AVL_GREATER_EQUAL);
1014 physr = physr_get_iter(&iter);
1017 FREE_RANGE_HERE(NULL, physr);
1019 if(physr) {
1020 physr = reset_physr_iter(region, &iter, physr->offset);
1021 if(physr->offset + physr->ph->length <= offset) {
1022 physr_incr_iter(&iter);
1023 physr = physr_get_iter(&iter);
1025 FREE_RANGE_HERE(NULL, physr);
1026 if(physr) {
1027 physr = reset_physr_iter(region, &iter,
1028 physr->offset);
1033 while(physr) {
1034 int r;
1036 SANITYCHECK(SCL_DETAIL);
1038 if(write) {
1039 assert(physr->ph->refcount > 0);
1040 if(!WRITABLE(region, physr->ph)) {
1041 if(!(physr = map_clone_ph_block(vmp, region,
1042 physr, &iter))) {
1043 printf("VM: map_handle_memory: no copy\n");
1044 return ENOMEM;
1046 changes++;
1047 } else {
1048 SANITYCHECK(SCL_DETAIL);
1049 if((r=map_ph_writept(vmp, region, physr)) != OK) {
1050 printf("VM: map_ph_writept failed\n");
1051 return r;
1053 changes++;
1054 SANITYCHECK(SCL_DETAIL);
1058 SANITYCHECK(SCL_DETAIL);
1059 physr_incr_iter(&iter);
1060 nextphysr = physr_get_iter(&iter);
1061 FREE_RANGE_HERE(physr, nextphysr);
1062 SANITYCHECK(SCL_DETAIL);
1063 if(nextphysr) {
1064 if(nextphysr->offset >= offset + length)
1065 break;
1066 nextphysr = reset_physr_iter(region, &iter,
1067 nextphysr->offset);
1069 physr = nextphysr;
1072 SANITYCHECK(SCL_FUNCTIONS);
1074 if(changes < 1) {
1075 #if VERBOSE
1076 printf("region start at 0x%lx offset 0x%lx len 0x%lx write %d\n",
1077 region->vaddr, offset, length, write);
1078 printf("no changes in map_handle_memory\n");
1079 #endif
1080 return EFAULT;
1083 #if SANITYCHECKS
1084 if(OK != pt_checkrange(&vmp->vm_pt, region->vaddr+offset, length, write)) {
1085 printf("handle mem %s-", arch_map2str(vmp, region->vaddr+offset));
1086 printf("%s failed\n", arch_map2str(vmp, region->vaddr+offset+length));
1087 map_printregion(vmp, region);
1088 panic("checkrange failed");
1090 #endif
1092 return OK;
1095 #if SANITYCHECKS
1096 static int countregions(struct vir_region *vr)
1098 int n = 0;
1099 struct phys_region *ph;
1100 physr_iter iter;
1101 physr_start_iter_least(vr->phys, &iter);
1102 while((ph = physr_get_iter(&iter))) {
1103 n++;
1104 physr_incr_iter(&iter);
1106 return n;
1108 #endif
1110 /*===========================================================================*
1111 * map_copy_region *
1112 *===========================================================================*/
1113 PRIVATE struct vir_region *map_copy_region(struct vmproc *vmp, struct vir_region *vr)
1115 /* map_copy_region creates a complete copy of the vir_region
1116 * data structure, linking in the same phys_blocks directly,
1117 * but all in limbo, i.e., the caller has to link the vir_region
1118 * to a process. Therefore it doesn't increase the refcount in
1119 * the phys_block; the caller has to do this once it's linked.
1120 * The reason for this is to keep the sanity checks working
1121 * within this function.
1123 struct vir_region *newvr;
1124 struct phys_region *ph;
1125 physr_iter iter;
1126 physr_avl *phavl;
1127 #if SANITYCHECKS
1128 int cr;
1129 cr = countregions(vr);
1130 #endif
1132 if(!SLABALLOC(newvr))
1133 return NULL;
1134 SLABALLOC(phavl);
1135 if(!phavl) {
1136 SLABFREE(newvr);
1137 return NULL;
1139 USE(newvr,
1140 *newvr = *vr;
1141 newvr->next = NULL;
1142 newvr->phys = phavl;
1144 physr_init(newvr->phys);
1146 physr_start_iter_least(vr->phys, &iter);
1147 while((ph = physr_get_iter(&iter))) {
1148 struct phys_region *newph;
1149 if(!SLABALLOC(newph)) {
1150 map_free(vmp, newvr);
1151 return NULL;
1153 USE(newph,
1154 newph->ph = ph->ph;
1155 newph->next_ph_list = NULL;
1156 newph->parent = newvr;
1157 newph->offset = ph->offset;);
1158 #if SANITYCHECKS
1159 USE(newph, newph->written = 0;);
1160 #endif
1161 physr_insert(newvr->phys, newph);
1162 #if SANITYCHECKS
1163 assert(countregions(vr) == cr);
1164 #endif
1165 physr_incr_iter(&iter);
1168 #if SANITYCHECKS
1169 assert(countregions(vr) == countregions(newvr));
1170 #endif
1172 return newvr;
1175 /*===========================================================================*
1176 * copy_abs2region *
1177 *===========================================================================*/
1178 PUBLIC int copy_abs2region(phys_bytes abs, struct vir_region *destregion,
1179 phys_bytes offset, phys_bytes len)
1182 assert(destregion);
1183 assert(destregion->phys);
1184 while(len > 0) {
1185 phys_bytes sublen, suboffset;
1186 struct phys_region *ph;
1187 assert(destregion);
1188 assert(destregion->phys);
1189 if(!(ph = physr_search(destregion->phys, offset, AVL_LESS_EQUAL))) {
1190 printf("VM: copy_abs2region: no phys region found (1).\n");
1191 return EFAULT;
1193 assert(ph->offset <= offset);
1194 if(ph->offset+ph->ph->length <= offset) {
1195 printf("VM: copy_abs2region: no phys region found (2).\n");
1196 return EFAULT;
1198 suboffset = offset - ph->offset;
1199 assert(suboffset < ph->ph->length);
1200 sublen = len;
1201 if(sublen > ph->ph->length - suboffset)
1202 sublen = ph->ph->length - suboffset;
1203 assert(suboffset + sublen <= ph->ph->length);
1204 if(ph->ph->refcount != 1) {
1205 printf("VM: copy_abs2region: no phys region found (3).\n");
1206 return EFAULT;
1209 if(sys_abscopy(abs, ph->ph->phys + suboffset, sublen) != OK) {
1210 printf("VM: copy_abs2region: abscopy failed.\n");
1211 return EFAULT;
1213 abs += sublen;
1214 offset += sublen;
1215 len -= sublen;
1218 return OK;
1221 /*=========================================================================*
1222 * map_writept *
1223 *=========================================================================*/
1224 PUBLIC int map_writept(struct vmproc *vmp)
1226 struct vir_region *vr;
1227 struct phys_region *ph;
1228 int r;
1230 for(vr = vmp->vm_regions; vr; vr = vr->next) {
1231 physr_iter iter;
1232 physr_start_iter_least(vr->phys, &iter);
1233 while((ph = physr_get_iter(&iter))) {
1234 physr_incr_iter(&iter);
1236 /* If this phys block is shared as SMAP, then do
1237 * not update the page table. */
1238 if(ph->ph->refcount > 1
1239 && ph->ph->share_flag == PBSH_SMAP) {
1240 continue;
1243 if((r=map_ph_writept(vmp, vr, ph)) != OK) {
1244 printf("VM: map_writept: failed\n");
1245 return r;
1250 return OK;
1253 /*========================================================================*
1254 * map_proc_copy *
1255 *========================================================================*/
1256 PUBLIC int map_proc_copy(dst, src)
1257 struct vmproc *dst;
1258 struct vmproc *src;
1260 struct vir_region *vr, *prevvr = NULL;
1261 dst->vm_regions = NULL;
1263 SANITYCHECK(SCL_FUNCTIONS);
1266 for(vr = src->vm_regions; vr; vr = vr->next) {
1267 physr_iter iter_orig, iter_new;
1268 struct vir_region *newvr;
1269 struct phys_region *orig_ph, *new_ph;
1270 if(!(newvr = map_copy_region(dst, vr))) {
1271 map_free_proc(dst);
1272 return ENOMEM;
1274 USE(newvr, newvr->parent = dst;);
1275 if(prevvr) { USE(prevvr, prevvr->next = newvr;); }
1276 else { dst->vm_regions = newvr; }
1277 physr_start_iter_least(vr->phys, &iter_orig);
1278 physr_start_iter_least(newvr->phys, &iter_new);
1279 while((orig_ph = physr_get_iter(&iter_orig))) {
1280 struct phys_block *pb;
1281 new_ph = physr_get_iter(&iter_new);
1282 /* Check two physregions both are nonnull,
1283 * are different, and match physblocks.
1285 assert(new_ph);
1286 assert(orig_ph);
1287 assert(orig_ph != new_ph);
1288 pb = orig_ph->ph;
1289 assert(pb == new_ph->ph);
1291 /* Link in new physregion. */
1292 assert(!new_ph->next_ph_list);
1293 USE(new_ph, new_ph->next_ph_list = pb->firstregion;);
1294 USE(pb, pb->firstregion = new_ph;);
1296 /* Increase phys block refcount */
1297 assert(pb->refcount > 0);
1298 USE(pb, pb->refcount++;);
1299 assert(pb->refcount > 1);
1301 /* If the phys block has been shared as SMAP,
1302 * do the regular copy. */
1303 if(pb->refcount > 2 && pb->share_flag == PBSH_SMAP) {
1304 map_clone_ph_block(dst, newvr,new_ph,
1305 &iter_new);
1306 } else {
1307 USE(pb, pb->share_flag = PBSH_COW;);
1310 /* Get next new physregion */
1311 physr_incr_iter(&iter_orig);
1312 physr_incr_iter(&iter_new);
1314 assert(!physr_get_iter(&iter_new));
1315 prevvr = newvr;
1318 map_writept(src);
1319 map_writept(dst);
1321 SANITYCHECK(SCL_FUNCTIONS);
1322 return OK;
1325 /*========================================================================*
1326 * map_proc_kernel *
1327 *========================================================================*/
1328 PUBLIC struct vir_region *map_proc_kernel(struct vmproc *vmp)
1330 struct vir_region *vr;
1332 /* We assume these are the first regions to be mapped to
1333 * make the function a bit simpler (free all regions on error).
1335 assert(!vmp->vm_regions);
1336 assert(vmproc[VMP_SYSTEM].vm_flags & VMF_INUSE);
1337 assert(!(KERNEL_TEXT % VM_PAGE_SIZE));
1338 assert(!(KERNEL_TEXT_LEN % VM_PAGE_SIZE));
1339 assert(!(KERNEL_DATA % VM_PAGE_SIZE));
1340 assert(!(KERNEL_DATA_LEN % VM_PAGE_SIZE));
1342 if(!(vr = map_page_region(vmp, KERNEL_TEXT, 0, KERNEL_TEXT_LEN,
1343 KERNEL_TEXT, VR_DIRECT | VR_WRITABLE | VR_NOPF, 0)) ||
1344 !(vr = map_page_region(vmp, KERNEL_DATA, 0, KERNEL_DATA_LEN,
1345 KERNEL_DATA, VR_DIRECT | VR_WRITABLE | VR_NOPF, 0))) {
1346 map_free_proc(vmp);
1347 return NULL;
1350 return vr; /* Return pointer not useful, just non-NULL. */
1353 /*========================================================================*
1354 * map_region_extend *
1355 *========================================================================*/
1356 PUBLIC int map_region_extend(struct vmproc *vmp, struct vir_region *vr,
1357 vir_bytes delta)
1359 vir_bytes end;
1361 assert(vr);
1362 assert(vr->flags & VR_ANON);
1363 assert(!(delta % VM_PAGE_SIZE));
1365 if(!delta) return OK;
1366 end = vr->vaddr + vr->length;
1367 assert(end >= vr->vaddr);
1369 if(end + delta <= end) {
1370 printf("VM: strange delta 0x%lx\n", delta);
1371 return ENOMEM;
1374 if(!vr->next || end + delta <= vr->next->vaddr) {
1375 USE(vr, vr->length += delta;);
1376 return OK;
1379 map_printmap(vmp);
1381 return ENOMEM;
1384 /*========================================================================*
1385 * map_region_shrink *
1386 *========================================================================*/
1387 PUBLIC int map_region_shrink(struct vir_region *vr, vir_bytes delta)
1389 assert(vr);
1390 assert(vr->flags & VR_ANON);
1391 assert(!(delta % VM_PAGE_SIZE));
1393 #if 0
1394 printf("VM: ignoring region shrink\n");
1395 #endif
1397 return OK;
1400 PUBLIC struct vir_region *map_region_lookup_tag(vmp, tag)
1401 struct vmproc *vmp;
1402 u32_t tag;
1404 struct vir_region *vr;
1406 for(vr = vmp->vm_regions; vr; vr = vr->next)
1407 if(vr->tag == tag)
1408 return vr;
1410 return NULL;
1413 PUBLIC void map_region_set_tag(struct vir_region *vr, u32_t tag)
1415 USE(vr, vr->tag = tag;);
1418 PUBLIC u32_t map_region_get_tag(struct vir_region *vr)
1420 return vr->tag;
1423 /*========================================================================*
1424 * map_unmap_region *
1425 *========================================================================*/
1426 PUBLIC int map_unmap_region(struct vmproc *vmp, struct vir_region *region,
1427 vir_bytes len)
1429 /* Shrink the region by 'len' bytes, from the start. Unreference
1430 * memory it used to reference if any.
1432 struct vir_region *r, *nextr, *prev = NULL;
1433 vir_bytes regionstart;
1435 SANITYCHECK(SCL_FUNCTIONS);
1437 for(r = vmp->vm_regions; r; r = r->next) {
1438 if(r == region)
1439 break;
1441 prev = r;
1444 SANITYCHECK(SCL_DETAIL);
1446 if(r == NULL)
1447 panic("map_unmap_region: region not found");
1449 if(len > r->length || (len % VM_PAGE_SIZE)) {
1450 printf("VM: bogus length 0x%lx\n", len);
1451 return EINVAL;
1454 if(!(r->flags & (VR_ANON|VR_DIRECT))) {
1455 printf("VM: only unmap anonymous or direct memory\n");
1456 return EINVAL;
1459 regionstart = r->vaddr;
1461 if(len == r->length) {
1462 /* Whole region disappears. Unlink and free it. */
1463 if(!prev) {
1464 vmp->vm_regions = r->next;
1465 } else {
1466 USE(prev, prev->next = r->next;);
1468 map_free(vmp, r);
1469 } else {
1470 struct phys_region *pr;
1471 physr_iter iter;
1472 /* Region shrinks. First unreference its memory
1473 * and then shrink the region.
1475 map_subfree(vmp, r, len);
1476 USE(r,
1477 r->vaddr += len;
1478 r->length -= len;);
1479 physr_start_iter_least(r->phys, &iter);
1481 /* vaddr has increased; to make all the phys_regions
1482 * point to the same addresses, make them shrink by the
1483 * same amount.
1485 while((pr = physr_get_iter(&iter))) {
1486 assert(pr->offset >= len);
1487 USE(pr, pr->offset -= len;);
1488 physr_incr_iter(&iter);
1492 SANITYCHECK(SCL_DETAIL);
1494 if(pt_writemap(&vmp->vm_pt, regionstart,
1495 MAP_NONE, len, 0, WMF_OVERWRITE) != OK) {
1496 printf("VM: map_unmap_region: pt_writemap failed\n");
1497 return ENOMEM;
1500 SANITYCHECK(SCL_FUNCTIONS);
1502 return OK;
1505 /*========================================================================*
1506 * map_remap *
1507 *========================================================================*/
1508 PUBLIC int map_remap(struct vmproc *dvmp, vir_bytes da, size_t size,
1509 struct vir_region *region, vir_bytes *r)
1511 struct vir_region *vr, *prev;
1512 struct phys_region *ph;
1513 vir_bytes startv, dst_addr;
1514 physr_iter iter;
1516 SANITYCHECK(SCL_FUNCTIONS);
1518 assert(region->flags & VR_SHARED);
1520 /* da is handled differently */
1521 if (!da)
1522 dst_addr = dvmp->vm_stacktop;
1523 else
1524 dst_addr = da;
1525 dst_addr = arch_vir2map(dvmp, dst_addr);
1527 prev = NULL;
1528 /* round up to page size */
1529 assert(!(size % VM_PAGE_SIZE));
1530 startv = region_find_slot(dvmp, dst_addr, VM_DATATOP, size, &prev);
1531 if (startv == (vir_bytes) -1) {
1532 printf("map_remap: search 0x%x...\n", dst_addr);
1533 map_printmap(dvmp);
1534 return ENOMEM;
1536 /* when the user specifies the address, we cannot change it */
1537 if (da && (startv != dst_addr))
1538 return EINVAL;
1540 vr = map_copy_region(dvmp, region);
1541 if(!vr)
1542 return ENOMEM;
1544 USE(vr,
1545 vr->vaddr = startv;
1546 vr->length = size;
1547 vr->flags = region->flags;
1548 vr->tag = VRT_NONE;
1549 vr->parent = dvmp;);
1550 assert(vr->flags & VR_SHARED);
1552 if (prev) {
1553 USE(vr,
1554 vr->next = prev->next;);
1555 USE(prev, prev->next = vr;);
1556 } else {
1557 USE(vr,
1558 vr->next = dvmp->vm_regions;);
1559 dvmp->vm_regions = vr;
1562 physr_start_iter_least(vr->phys, &iter);
1563 while((ph = physr_get_iter(&iter))) {
1564 struct phys_block *pb = ph->ph;
1565 assert(!ph->next_ph_list);
1566 USE(ph, ph->next_ph_list = pb->firstregion;);
1567 USE(pb, pb->firstregion = ph;);
1568 USE(pb, pb->refcount++;);
1569 if(map_ph_writept(dvmp, vr, ph) != OK) {
1570 panic("map_remap: map_ph_writept failed");
1572 physr_incr_iter(&iter);
1575 *r = startv;
1577 SANITYCHECK(SCL_FUNCTIONS);
1579 return OK;
1582 /*========================================================================*
1583 * map_get_phys *
1584 *========================================================================*/
1585 PUBLIC int map_get_phys(struct vmproc *vmp, vir_bytes addr, phys_bytes *r)
1587 struct vir_region *vr;
1588 struct phys_region *ph;
1589 physr_iter iter;
1591 if (!(vr = map_lookup(vmp, addr)) ||
1592 (vr->vaddr != addr))
1593 return EINVAL;
1595 if (!(vr->flags & VR_SHARED))
1596 return EINVAL;
1598 physr_start_iter_least(vr->phys, &iter);
1599 ph = physr_get_iter(&iter);
1601 assert(ph);
1602 assert(ph->ph);
1603 if (r)
1604 *r = ph->ph->phys;
1606 return OK;
1609 /*========================================================================*
1610 * map_get_ref *
1611 *========================================================================*/
1612 PUBLIC int map_get_ref(struct vmproc *vmp, vir_bytes addr, u8_t *cnt)
1614 struct vir_region *vr;
1615 struct phys_region *ph;
1616 physr_iter iter;
1618 if (!(vr = map_lookup(vmp, addr)) ||
1619 (vr->vaddr != addr))
1620 return EINVAL;
1622 if (!(vr->flags & VR_SHARED))
1623 return EINVAL;
1625 physr_start_iter_least(vr->phys, &iter);
1626 ph = physr_get_iter(&iter);
1628 assert(ph);
1629 assert(ph->ph);
1630 if (cnt)
1631 *cnt = ph->ph->refcount;
1633 return OK;
1636 /*========================================================================*
1637 * get_usage_info *
1638 *========================================================================*/
1639 PUBLIC void get_usage_info(struct vmproc *vmp, struct vm_usage_info *vui)
1641 struct vir_region *vr;
1642 physr_iter iter;
1643 struct phys_region *ph;
1644 vir_bytes len;
1646 memset(vui, 0, sizeof(*vui));
1648 for(vr = vmp->vm_regions; vr; vr = vr->next) {
1649 physr_start_iter_least(vr->phys, &iter);
1650 while((ph = physr_get_iter(&iter))) {
1651 len = ph->ph->length;
1653 /* All present pages are counted towards the total. */
1654 vui->vui_total += len;
1656 if (ph->ph->refcount > 1) {
1657 /* Any page with a refcount > 1 is common. */
1658 vui->vui_common += len;
1660 /* Any common, non-COW page is shared. */
1661 if (vr->flags & VR_SHARED ||
1662 ph->ph->share_flag == PBSH_SMAP)
1663 vui->vui_shared += len;
1665 physr_incr_iter(&iter);
1670 /*===========================================================================*
1671 * get_region_info *
1672 *===========================================================================*/
1673 PUBLIC int get_region_info(struct vmproc *vmp, struct vm_region_info *vri,
1674 int max, vir_bytes *nextp)
1676 struct vir_region *vr;
1677 vir_bytes next;
1678 int count;
1680 next = *nextp;
1682 if (!max) return 0;
1684 for(vr = vmp->vm_regions; vr; vr = vr->next)
1685 if (vr->vaddr >= next) break;
1687 if (!vr) return 0;
1689 for(count = 0; vr && count < max; vr = vr->next, count++, vri++) {
1690 vri->vri_addr = arch_map2info(vmp, vr->vaddr, &vri->vri_seg,
1691 &vri->vri_prot);
1692 vri->vri_length = vr->length;
1694 /* "AND" the provided protection with per-page protection. */
1695 if (!(vr->flags & VR_WRITABLE))
1696 vri->vri_prot &= ~PROT_WRITE;
1698 vri->vri_flags = (vr->flags & VR_SHARED) ? MAP_SHARED : 0;
1700 next = vr->vaddr + vr->length;
1703 *nextp = next;
1704 return count;
1707 /*========================================================================*
1708 * regionprintstats *
1709 *========================================================================*/
1710 PUBLIC void printregionstats(struct vmproc *vmp)
1712 struct vir_region *vr;
1713 struct phys_region *pr;
1714 physr_iter iter;
1715 vir_bytes used = 0, weighted = 0;
1717 for(vr = vmp->vm_regions; vr; vr = vr->next) {
1718 if(vr->flags & VR_DIRECT)
1719 continue;
1720 physr_start_iter_least(vr->phys, &iter);
1721 while((pr = physr_get_iter(&iter))) {
1722 physr_incr_iter(&iter);
1723 used += pr->ph->length;
1724 weighted += pr->ph->length / pr->ph->refcount;
1728 printf("%6dkB %6dkB\n", used/1024, weighted/1024);
1730 return;
1733 /*===========================================================================*
1734 * do_map_memory *
1735 *===========================================================================*/
1736 PRIVATE int do_map_memory(struct vmproc *vms, struct vmproc *vmd,
1737 struct vir_region *vrs, struct vir_region *vrd,
1738 vir_bytes offset_s, vir_bytes offset_d,
1739 vir_bytes length, int flag)
1741 struct phys_region *prs;
1742 struct phys_region *newphysr;
1743 struct phys_block *pb;
1744 physr_iter iter;
1745 u32_t pt_flag = PTF_PRESENT | PTF_USER;
1746 vir_bytes end;
1748 SANITYCHECK(SCL_FUNCTIONS);
1750 /* Search for the first phys region in the source process. */
1751 physr_start_iter(vrs->phys, &iter, offset_s, AVL_EQUAL);
1752 prs = physr_get_iter(&iter);
1753 if(!prs)
1754 panic("do_map_memory: no aligned phys region: %d", 0);
1756 /* flag: 0 -> read-only
1757 * 1 -> writable
1758 * -1 -> share as COW, so read-only
1760 if(flag > 0)
1761 pt_flag |= PTF_WRITE;
1763 /* Map phys blocks in the source process to the destination process. */
1764 end = offset_d + length;
1765 while((prs = physr_get_iter(&iter)) && offset_d < end) {
1766 /* If a SMAP share was requested but the phys block has already
1767 * been shared as COW, copy the block for the source phys region
1768 * first.
1770 pb = prs->ph;
1771 if(flag >= 0 && pb->refcount > 1
1772 && pb->share_flag == PBSH_COW) {
1773 if(!(prs = map_clone_ph_block(vms, vrs, prs, &iter)))
1774 return ENOMEM;
1775 pb = prs->ph;
1778 /* Allocate a new phys region. */
1779 if(!SLABALLOC(newphysr))
1780 return ENOMEM;
1782 /* Set and link the new phys region to the block. */
1783 newphysr->ph = pb;
1784 newphysr->offset = offset_d;
1785 newphysr->parent = vrd;
1786 newphysr->next_ph_list = pb->firstregion;
1787 pb->firstregion = newphysr;
1788 physr_insert(newphysr->parent->phys, newphysr);
1789 pb->refcount++;
1791 /* If a COW share was requested but the phys block has already
1792 * been shared as SMAP, give up on COW and copy the block for
1793 * the destination phys region now.
1795 if(flag < 0 && pb->refcount > 1
1796 && pb->share_flag == PBSH_SMAP) {
1797 if(!(newphysr = map_clone_ph_block(vmd, vrd,
1798 newphysr, NULL))) {
1799 return ENOMEM;
1802 else {
1803 /* See if this is a COW share or SMAP share. */
1804 if(flag < 0) { /* COW share */
1805 pb->share_flag = PBSH_COW;
1806 /* Update the page table for the src process. */
1807 pt_writemap(&vms->vm_pt, offset_s + vrs->vaddr,
1808 pb->phys, pb->length,
1809 pt_flag, WMF_OVERWRITE);
1811 else { /* SMAP share */
1812 pb->share_flag = PBSH_SMAP;
1814 /* Update the page table for the destination process. */
1815 pt_writemap(&vmd->vm_pt, offset_d + vrd->vaddr,
1816 pb->phys, pb->length, pt_flag, WMF_OVERWRITE);
1819 physr_incr_iter(&iter);
1820 offset_d += pb->length;
1821 offset_s += pb->length;
1824 SANITYCHECK(SCL_FUNCTIONS);
1826 return OK;
1829 /*===========================================================================*
1830 * unmap_memory *
1831 *===========================================================================*/
1832 PUBLIC int unmap_memory(endpoint_t sour, endpoint_t dest,
1833 vir_bytes virt_s, vir_bytes virt_d, vir_bytes length, int flag)
1835 struct vmproc *vmd;
1836 struct vir_region *vrd;
1837 struct phys_region *pr;
1838 struct phys_block *pb;
1839 physr_iter iter;
1840 vir_bytes off, end;
1841 int p;
1843 /* Use information on the destination process to unmap. */
1844 if(vm_isokendpt(dest, &p) != OK)
1845 panic("unmap_memory: bad endpoint: %d", dest);
1846 vmd = &vmproc[p];
1848 vrd = map_lookup(vmd, virt_d);
1849 assert(vrd);
1851 /* Search for the first phys region in the destination process. */
1852 off = virt_d - vrd->vaddr;
1853 physr_start_iter(vrd->phys, &iter, off, AVL_EQUAL);
1854 pr = physr_get_iter(&iter);
1855 if(!pr)
1856 panic("unmap_memory: no aligned phys region: %d", 0);
1858 /* Copy the phys block now rather than doing COW. */
1859 end = off + length;
1860 while((pr = physr_get_iter(&iter)) && off < end) {
1861 pb = pr->ph;
1862 assert(pb->refcount > 1);
1863 assert(pb->share_flag == PBSH_SMAP);
1865 if(!(pr = map_clone_ph_block(vmd, vrd, pr, &iter)))
1866 return ENOMEM;
1868 physr_incr_iter(&iter);
1869 off += pb->length;
1872 return OK;
1875 /*===========================================================================*
1876 * split_phys *
1877 *===========================================================================*/
1878 PRIVATE int split_phys(struct phys_region *pr, vir_bytes point)
1880 struct phys_region *newpr, *q, *prev;
1881 struct phys_block *newpb;
1882 struct phys_block *pb = pr->ph;
1883 /* Split the phys region into 2 parts by @point. */
1885 if(pr->offset >= point || pr->offset + pb->length <= point)
1886 return OK;
1887 if(!SLABALLOC(newpb))
1888 return ENOMEM;
1890 /* Split phys block. */
1891 *newpb = *pb;
1892 pb->length = point - pr->offset;
1893 newpb->length -= pb->length;
1894 newpb->phys += pb->length;
1896 /* Split phys regions in a list. */
1897 for(q = pb->firstregion; q; q = q->next_ph_list) {
1898 if(!SLABALLOC(newpr))
1899 return ENOMEM;
1901 *newpr = *q;
1902 newpr->ph = newpb;
1903 newpr->offset += pb->length;
1905 /* Link to the vir region's phys region list. */
1906 physr_insert(newpr->parent->phys, newpr);
1908 /* Link to the next_ph_list. */
1909 if(q == pb->firstregion) {
1910 newpb->firstregion = newpr;
1911 prev = newpr;
1912 } else {
1913 prev->next_ph_list = newpr;
1914 prev = newpr;
1917 prev->next_ph_list = NULL;
1919 return OK;
1922 /*===========================================================================*
1923 * clean_phys_regions *
1924 *===========================================================================*/
1925 PRIVATE void clean_phys_regions(struct vir_region *region,
1926 vir_bytes offset, vir_bytes length)
1928 /* Consider @offset as the start address and @offset+length as the end address.
1929 * If there are phys regions crossing the start address or the end address,
1930 * split them into 2 parts.
1932 * We assume that the phys regions are listed in order and don't overlap.
1934 struct phys_region *pr;
1935 physr_iter iter;
1937 physr_start_iter_least(region->phys, &iter);
1938 while((pr = physr_get_iter(&iter))) {
1939 /* If this phys region crosses the start address, split it. */
1940 if(pr->offset < offset
1941 && pr->offset + pr->ph->length > offset) {
1942 split_phys(pr, offset);
1943 physr_start_iter_least(region->phys, &iter);
1945 /* If this phys region crosses the end address, split it. */
1946 else if(pr->offset < offset + length
1947 && pr->offset + pr->ph->length > offset + length) {
1948 split_phys(pr, offset + length);
1949 physr_start_iter_least(region->phys, &iter);
1951 else {
1952 physr_incr_iter(&iter);
1957 /*===========================================================================*
1958 * rm_phys_regions *
1959 *===========================================================================*/
1960 PRIVATE void rm_phys_regions(struct vir_region *region,
1961 vir_bytes begin, vir_bytes length)
1963 /* Remove all phys regions between @begin and @begin+length.
1965 * Don't update the page table, because we will update it at map_memory()
1966 * later.
1968 struct phys_region *pr;
1969 physr_iter iter;
1971 physr_start_iter(region->phys, &iter, begin, AVL_GREATER_EQUAL);
1972 while((pr = physr_get_iter(&iter)) && pr->offset < begin + length) {
1973 pb_unreferenced(region, pr);
1974 physr_remove(region->phys, pr->offset);
1975 physr_start_iter(region->phys, &iter, begin,
1976 AVL_GREATER_EQUAL);
1977 SLABFREE(pr);
1981 /*===========================================================================*
1982 * map_memory *
1983 *===========================================================================*/
1984 PUBLIC int map_memory(endpoint_t sour, endpoint_t dest,
1985 vir_bytes virt_s, vir_bytes virt_d, vir_bytes length, int flag)
1987 /* This is the entry point. This function will be called by handle_memory() when
1988 * VM recieves a map-memory request.
1990 struct vmproc *vms, *vmd;
1991 struct vir_region *vrs, *vrd;
1992 physr_iter iterd;
1993 vir_bytes offset_s, offset_d;
1994 int p;
1995 int r;
1997 if(vm_isokendpt(sour, &p) != OK)
1998 panic("map_memory: bad endpoint: %d", sour);
1999 vms = &vmproc[p];
2000 if(vm_isokendpt(dest, &p) != OK)
2001 panic("map_memory: bad endpoint: %d", dest);
2002 vmd = &vmproc[p];
2004 vrs = map_lookup(vms, virt_s);
2005 assert(vrs);
2006 vrd = map_lookup(vmd, virt_d);
2007 assert(vrd);
2009 /* Linear address -> offset from start of vir region. */
2010 offset_s = virt_s - vrs->vaddr;
2011 offset_d = virt_d - vrd->vaddr;
2013 /* Make sure that the range in the source process has been mapped
2014 * to physical memory.
2016 map_handle_memory(vms, vrs, offset_s, length, 0);
2018 /* Prepare work. */
2019 clean_phys_regions(vrs, offset_s, length);
2020 clean_phys_regions(vrd, offset_d, length);
2021 rm_phys_regions(vrd, offset_d, length);
2023 /* Map memory. */
2024 r = do_map_memory(vms, vmd, vrs, vrd, offset_s, offset_d, length, flag);
2026 return r;
2029 /*========================================================================*
2030 * map_lookup_phys *
2031 *========================================================================*/
2032 phys_bytes
2033 map_lookup_phys(struct vmproc *vmp, u32_t tag)
2035 struct vir_region *vr;
2036 struct phys_region *pr;
2037 physr_iter iter;
2039 if(!(vr = map_region_lookup_tag(vmp, tag))) {
2040 printf("VM: request for phys of missing region\n");
2041 return MAP_NONE;
2044 physr_start_iter_least(vr->phys, &iter);
2046 if(!(pr = physr_get_iter(&iter))) {
2047 printf("VM: request for phys of unmapped region\n");
2048 return MAP_NONE;
2051 if(pr->offset != 0 || pr->ph->length != vr->length) {
2052 printf("VM: request for phys of partially mapped region\n");
2053 return MAP_NONE;
2056 return pr->ph->phys;