5 #include <minix/callnr.h>
6 #include <minix/type.h>
7 #include <minix/config.h>
8 #include <minix/const.h>
9 #include <minix/sysutil.h>
10 #include <minix/syslib.h>
11 #include <minix/debug.h>
12 #include <minix/bitmap.h>
28 #include "sanitycheck.h"
32 /* Should a physblock be mapped writable? */
33 #define WRITABLE(r, pb) \
34 (((r)->flags & (VR_DIRECT | VR_SHARED)) || \
35 (((r)->flags & VR_WRITABLE) && (pb)->refcount == 1))
37 FORWARD
_PROTOTYPE(int map_new_physblock
, (struct vmproc
*vmp
,
38 struct vir_region
*region
, vir_bytes offset
, vir_bytes length
,
39 phys_bytes what
, u32_t allocflags
, int written
));
41 FORWARD
_PROTOTYPE(int map_ph_writept
, (struct vmproc
*vmp
, struct vir_region
*vr
,
42 struct phys_region
*pr
));
44 FORWARD
_PROTOTYPE(struct vir_region
*map_copy_region
, (struct vmproc
*vmp
, struct vir_region
*vr
));
46 FORWARD
_PROTOTYPE(struct phys_region
*map_clone_ph_block
, (struct vmproc
*vmp
,
47 struct vir_region
*region
, struct phys_region
*ph
, physr_iter
*iter
));
49 PRIVATE
char *map_name(struct vir_region
*vr
)
51 static char name
[100];
53 int type
= vr
->flags
& (VR_ANON
|VR_DIRECT
);
56 typename
= "anonymous";
62 panic("unknown mapping type: %d", type
);
79 tag
= "unknown tag value";
83 sprintf(name
, "%s, %s", typename
, tag
);
88 PUBLIC
void map_printregion(struct vmproc
*vmp
, struct vir_region
*vr
)
91 struct phys_region
*ph
;
92 printf("map_printmap: map_name: %s\n", map_name(vr
));
93 printf("\t%s (len 0x%lx, %dkB), %s\n",
94 arch_map2str(vmp
, vr
->vaddr
), vr
->length
,
95 vr
->length
/1024, map_name(vr
));
96 printf("\t\tphysblocks:\n");
97 physr_start_iter_least(vr
->phys
, &iter
);
98 while((ph
= physr_get_iter(&iter
))) {
99 printf("\t\t@ %s (refs %d): phys 0x%lx len 0x%lx\n",
100 arch_map2str(vmp
, vr
->vaddr
+ ph
->offset
),
101 ph
->ph
->refcount
, ph
->ph
->phys
, ph
->ph
->length
);
102 physr_incr_iter(&iter
);
106 /*===========================================================================*
108 *===========================================================================*/
109 PUBLIC
void map_printmap(vmp
)
112 struct vir_region
*vr
;
114 printf("memory regions in process %d:\n", vmp
->vm_endpoint
);
115 for(vr
= vmp
->vm_regions
; vr
; vr
= vr
->next
) {
116 map_printregion(vmp
, vr
);
124 /*===========================================================================*
125 * map_sanitycheck_pt *
126 *===========================================================================*/
127 PRIVATE
int map_sanitycheck_pt(struct vmproc
*vmp
,
128 struct vir_region
*vr
, struct phys_region
*pr
)
130 struct phys_block
*pb
= pr
->ph
;
134 if(!(vmp
->vm_flags
& VMF_HASPT
))
142 r
= pt_writemap(&vmp
->vm_pt
, vr
->vaddr
+ pr
->offset
,
143 pb
->phys
, pb
->length
, PTF_PRESENT
| PTF_USER
| rw
, WMF_VERIFY
);
146 printf("proc %d phys_region 0x%lx sanity check failed\n",
147 vmp
->vm_endpoint
, pr
->offset
);
148 map_printregion(vmp
, vr
);
154 /*===========================================================================*
156 *===========================================================================*/
157 PUBLIC
void map_sanitycheck(char *file
, int line
)
161 /* Macro for looping over all physical blocks of all regions of
164 #define ALLREGIONS(regioncode, physcode) \
165 for(vmp = vmproc; vmp < &vmproc[VMP_NR]; vmp++) { \
166 struct vir_region *vr; \
167 if(!(vmp->vm_flags & VMF_INUSE)) \
169 for(vr = vmp->vm_regions; vr; vr = vr->next) { \
171 struct phys_region *pr; \
173 physr_start_iter_least(vr->phys, &iter); \
174 while((pr = physr_get_iter(&iter))) { \
176 physr_incr_iter(&iter); \
181 #define MYSLABSANE(s) MYASSERT(slabsane_f(__FILE__, __LINE__, s, sizeof(*(s))))
182 /* Basic pointers check. */
183 ALLREGIONS(MYSLABSANE(vr
),MYSLABSANE(pr
); MYSLABSANE(pr
->ph
);MYSLABSANE(pr
->parent
));
184 ALLREGIONS(/* MYASSERT(vr->parent == vmp) */,MYASSERT(pr
->parent
== vr
););
186 /* Do counting for consistency check. */
187 ALLREGIONS(;,USE(pr
->ph
, pr
->ph
->seencount
= 0;););
188 ALLREGIONS(;,USE(pr
->ph
, pr
->ph
->seencount
++;);
189 if(pr
->ph
->seencount
== 1) {
190 MYASSERT(usedpages_add(pr
->ph
->phys
,
191 pr
->ph
->length
) == OK
);
195 /* Do consistency check. */
196 ALLREGIONS(if(vr
->next
) {
197 MYASSERT(vr
->vaddr
< vr
->next
->vaddr
);
198 MYASSERT(vr
->vaddr
+ vr
->length
<= vr
->next
->vaddr
);
200 MYASSERT(!(vr
->vaddr
% VM_PAGE_SIZE
));,
201 if(pr
->ph
->refcount
!= pr
->ph
->seencount
) {
203 printf("ph in vr 0x%lx: 0x%lx-0x%lx refcount %d "
204 "but seencount %lu\n",
206 pr
->offset
+ pr
->ph
->length
,
207 pr
->ph
->refcount
, pr
->ph
->seencount
);
211 struct phys_region
*others
;
212 if(pr
->ph
->refcount
> 0) {
213 MYASSERT(pr
->ph
->firstregion
);
214 if(pr
->ph
->refcount
== 1) {
215 MYASSERT(pr
->ph
->firstregion
== pr
);
218 MYASSERT(!pr
->ph
->firstregion
);
220 for(others
= pr
->ph
->firstregion
; others
;
221 others
= others
->next_ph_list
) {
223 MYASSERT(others
->ph
== pr
->ph
);
226 MYASSERT(pr
->ph
->refcount
== n_others
);
228 MYASSERT(pr
->ph
->refcount
== pr
->ph
->seencount
);
229 MYASSERT(!(pr
->offset
% VM_PAGE_SIZE
));
230 MYASSERT(!(pr
->ph
->length
% VM_PAGE_SIZE
)););
231 ALLREGIONS(,MYASSERT(map_sanitycheck_pt(vmp
, vr
, pr
) == OK
));
236 /*=========================================================================*
238 *=========================================================================*/
239 PRIVATE
int map_ph_writept(struct vmproc
*vmp
, struct vir_region
*vr
,
240 struct phys_region
*pr
)
243 struct phys_block
*pb
= pr
->ph
;
245 assert(!(vr
->vaddr
% VM_PAGE_SIZE
));
246 assert(!(pb
->length
% VM_PAGE_SIZE
));
247 assert(!(pr
->offset
% VM_PAGE_SIZE
));
248 assert(pb
->refcount
> 0);
255 if(pt_writemap(&vmp
->vm_pt
, vr
->vaddr
+ pr
->offset
,
256 pb
->phys
, pb
->length
, PTF_PRESENT
| PTF_USER
| rw
,
260 WMF_OVERWRITE
) != OK
) {
261 printf("VM: map_writept: pt_writemap failed\n");
266 USE(pr
, pr
->written
= 1;);
272 /*===========================================================================*
274 *===========================================================================*/
275 PRIVATE vir_bytes
region_find_slot(struct vmproc
*vmp
,
276 vir_bytes minv
, vir_bytes maxv
, vir_bytes length
,
277 struct vir_region
**prev
)
279 struct vir_region
*firstregion
= vmp
->vm_regions
, *prevregion
= NULL
;
283 SANITYCHECK(SCL_FUNCTIONS
);
285 /* We must be in paged mode to be able to do this. */
288 /* Length must be reasonable. */
291 /* Special case: allow caller to set maxv to 0 meaning 'I want
292 * it to be mapped in right here.'
295 maxv
= minv
+ length
;
299 printf("region_find_slot: minv 0x%lx and bytes 0x%lx\n",
302 return (vir_bytes
) -1;
306 /* Basic input sanity checks. */
307 assert(!(length
% VM_PAGE_SIZE
));
309 printf("VM: 1 minv: 0x%lx maxv: 0x%lx length: 0x%lx\n",
313 assert(minv
+ length
<= maxv
);
315 #define FREEVRANGE(rangestart, rangeend, foundcode) { \
316 vir_bytes frstart = (rangestart), frend = (rangeend); \
317 frstart = MAX(frstart, minv); \
318 frend = MIN(frend, maxv); \
319 if(frend > frstart && (frend - frstart) >= length) { \
325 /* This is the free virtual address space before the first region. */
326 FREEVRANGE(0, firstregion
? firstregion
->vaddr
: VM_DATATOP
, ;);
329 struct vir_region
*vr
;
330 for(vr
= vmp
->vm_regions
; vr
&& !foundflag
; vr
= vr
->next
) {
331 FREEVRANGE(vr
->vaddr
+ vr
->length
,
332 vr
->next
? vr
->next
->vaddr
: VM_DATATOP
,
338 printf("VM: region_find_slot: no 0x%lx bytes found for %d between 0x%lx and 0x%lx\n",
339 length
, vmp
->vm_endpoint
, minv
, maxv
);
341 return (vir_bytes
) -1;
345 if(prevregion
) assert(prevregion
->vaddr
< startv
);
348 /* However we got it, startv must be in the requested range. */
349 assert(startv
>= minv
);
350 assert(startv
< maxv
);
351 assert(startv
+ length
<= maxv
);
358 /*===========================================================================*
360 *===========================================================================*/
361 PUBLIC
struct vir_region
*map_page_region(vmp
, minv
, maxv
, length
,
362 what
, flags
, mapflags
)
371 struct vir_region
*prevregion
= NULL
, *newregion
;
373 struct phys_region
*ph
;
376 assert(!(length
% VM_PAGE_SIZE
));
378 SANITYCHECK(SCL_FUNCTIONS
);
380 startv
= region_find_slot(vmp
, minv
, maxv
, length
, &prevregion
);
381 if (startv
== (vir_bytes
) -1)
384 /* Now we want a new region. */
385 if(!SLABALLOC(newregion
)) {
386 printf("VM: map_page_region: allocating region failed\n");
390 /* Fill in node details. */
392 newregion
->vaddr
= startv
;
393 newregion
->length
= length
;
394 newregion
->flags
= flags
;
395 newregion
->tag
= VRT_NONE
;
396 newregion
->parent
= vmp
;);
400 printf("VM: map_page_region: allocating phys avl failed\n");
404 USE(newregion
, newregion
->phys
= phavl
;);
406 physr_init(newregion
->phys
);
408 /* If we know what we're going to map to, map it right away. */
409 if(what
!= MAP_NONE
) {
410 assert(!(what
% VM_PAGE_SIZE
));
411 assert(!(startv
% VM_PAGE_SIZE
));
412 assert(!(mapflags
& MF_PREALLOC
));
413 if(map_new_physblock(vmp
, newregion
, 0, length
,
414 what
, PAF_CLEAR
, 0) != OK
) {
415 printf("VM: map_new_physblock failed\n");
417 SLABFREE(newregion
->phys
););
423 if((flags
& VR_ANON
) && (mapflags
& MF_PREALLOC
)) {
424 if(map_handle_memory(vmp
, newregion
, 0, length
, 1) != OK
) {
425 printf("VM: map_page_region: prealloc failed\n");
427 SLABFREE(newregion
->phys
););
435 assert(prevregion
->vaddr
< newregion
->vaddr
);
436 USE(newregion
, newregion
->next
= prevregion
->next
;);
437 USE(prevregion
, prevregion
->next
= newregion
;);
439 USE(newregion
, newregion
->next
= vmp
->vm_regions
;);
440 vmp
->vm_regions
= newregion
;
444 assert(startv
== newregion
->vaddr
);
445 if(newregion
->next
) {
446 assert(newregion
->vaddr
< newregion
->next
->vaddr
);
450 SANITYCHECK(SCL_FUNCTIONS
);
455 /*===========================================================================*
457 *===========================================================================*/
458 PUBLIC
void pb_unreferenced(struct vir_region
*region
, struct phys_region
*pr
)
460 struct phys_block
*pb
;
464 assert(pb
->refcount
> 0);
465 USE(pb
, pb
->refcount
--;);
466 assert(pb
->refcount
>= 0);
468 if(pb
->firstregion
== pr
) {
469 USE(pb
, pb
->firstregion
= pr
->next_ph_list
;);
471 struct phys_region
*others
;
473 for(others
= pb
->firstregion
; others
;
474 others
= others
->next_ph_list
) {
475 assert(others
->ph
== pb
);
476 if(others
->next_ph_list
== pr
) {
477 USE(others
, others
->next_ph_list
= pr
->next_ph_list
;);
482 assert(others
); /* Otherwise, wasn't on the list. */
485 if(pb
->refcount
== 0) {
486 assert(!pb
->firstregion
);
487 if(region
->flags
& VR_ANON
) {
488 free_mem(ABS2CLICK(pb
->phys
),
489 ABS2CLICK(pb
->length
));
490 } else if(region
->flags
& VR_DIRECT
) {
491 ; /* No action required. */
493 panic("strange phys flags");
497 struct phys_region
*others
;
500 for(others
= pb
->firstregion
; others
;
501 others
= others
->next_ph_list
) {
502 if(WRITABLE(region
, others
->ph
)) {
503 if(map_ph_writept(others
->parent
->parent
,
504 others
->parent
, others
) != OK
) {
505 printf("VM: map_ph_writept failed unexpectedly\n");
510 assert(n
== pb
->refcount
);
514 PRIVATE
struct phys_region
*reset_physr_iter(struct vir_region
*region
,
515 physr_iter
*iter
, vir_bytes offset
)
517 struct phys_region
*ph
;
519 physr_start_iter(region
->phys
, iter
, offset
, AVL_EQUAL
);
520 ph
= physr_get_iter(iter
);
522 assert(ph
->offset
== offset
);
527 /*===========================================================================*
529 *===========================================================================*/
530 PRIVATE
int map_subfree(struct vmproc
*vmp
,
531 struct vir_region
*region
, vir_bytes len
)
533 struct phys_region
*pr
, *nextpr
;
539 physr_start_iter_least(region
->phys
, &iter
);
540 while((pr
= physr_get_iter(&iter
))) {
541 struct phys_region
*others
;
542 struct phys_block
*pb
;
546 for(others
= pb
->firstregion
; others
;
547 others
= others
->next_ph_list
) {
548 assert(others
->ph
== pb
);
550 physr_incr_iter(&iter
);
555 physr_start_iter_least(region
->phys
, &iter
);
556 while((pr
= physr_get_iter(&iter
))) {
557 physr_incr_iter(&iter
);
558 if(pr
->offset
>= len
)
560 if(pr
->offset
+ pr
->ph
->length
<= len
) {
561 pb_unreferenced(region
, pr
);
562 physr_remove(region
->phys
, pr
->offset
);
563 physr_start_iter_least(region
->phys
, &iter
);
567 assert(len
> pr
->offset
);
568 assert(len
< pr
->offset
+ pr
->ph
->length
);
569 assert(pr
->ph
->refcount
> 0);
570 sublen
= len
- pr
->offset
;
571 assert(!(sublen
% VM_PAGE_SIZE
));
572 assert(sublen
< pr
->ph
->length
);
573 if(pr
->ph
->refcount
> 1) {
575 if(!(pr
= map_clone_ph_block(vmp
, region
,
579 assert(pr
->ph
->refcount
== 1);
580 if(!(region
->flags
& VR_DIRECT
)) {
581 free_mem(ABS2CLICK(pr
->ph
->phys
), ABS2CLICK(sublen
));
583 USE(pr
, pr
->offset
+= sublen
;);
585 pr
->ph
->phys
+= sublen
;
586 pr
->ph
->length
-= sublen
;);
587 assert(!(pr
->offset
% VM_PAGE_SIZE
));
588 assert(!(pr
->ph
->phys
% VM_PAGE_SIZE
));
589 assert(!(pr
->ph
->length
% VM_PAGE_SIZE
));
596 /*===========================================================================*
598 *===========================================================================*/
599 PRIVATE
int map_free(struct vmproc
*vmp
, struct vir_region
*region
)
603 if((r
=map_subfree(vmp
, region
, region
->length
)) != OK
) {
604 printf("%d\n", __LINE__
);
609 SLABFREE(region
->phys
););
615 /*========================================================================*
617 *========================================================================*/
618 PUBLIC
int map_free_proc(vmp
)
621 struct vir_region
*r
, *nextr
;
623 SANITYCHECK(SCL_FUNCTIONS
);
625 for(r
= vmp
->vm_regions
; r
; r
= nextr
) {
627 SANITYCHECK(SCL_DETAIL
);
632 vmp
->vm_regions
= nextr
; /* For sanity checks. */
636 SANITYCHECK(SCL_DETAIL
);
639 vmp
->vm_regions
= NULL
;
641 SANITYCHECK(SCL_FUNCTIONS
);
646 /*===========================================================================*
648 *===========================================================================*/
649 PUBLIC
struct vir_region
*map_lookup(vmp
, offset
)
653 struct vir_region
*r
;
655 SANITYCHECK(SCL_FUNCTIONS
);
658 panic("process has no regions: %d", vmp
->vm_endpoint
);
660 for(r
= vmp
->vm_regions
; r
; r
= r
->next
) {
661 if(offset
>= r
->vaddr
&& offset
< r
->vaddr
+ r
->length
)
665 SANITYCHECK(SCL_FUNCTIONS
);
670 PRIVATE u32_t
vrallocflags(u32_t flags
)
672 u32_t allocflags
= 0;
674 if(flags
& VR_PHYS64K
)
675 allocflags
|= PAF_ALIGN64K
;
676 if(flags
& VR_LOWER16MB
)
677 allocflags
|= PAF_LOWER16MB
;
678 if(flags
& VR_LOWER1MB
)
679 allocflags
|= PAF_LOWER1MB
;
680 if(flags
& VR_CONTIG
)
681 allocflags
|= PAF_CONTIG
;
686 /*===========================================================================*
687 * map_new_physblock *
688 *===========================================================================*/
689 PRIVATE
int map_new_physblock(vmp
, region
, start_offset
, length
,
690 what_mem
, allocflags
, written
)
692 struct vir_region
*region
;
693 vir_bytes start_offset
;
699 struct memlist
*memlist
, given
, *ml
;
701 vir_bytes mapped
= 0;
702 vir_bytes offset
= start_offset
;
704 SANITYCHECK(SCL_FUNCTIONS
);
706 assert(!(length
% VM_PAGE_SIZE
));
708 if((region
->flags
& VR_CONTIG
) &&
709 (start_offset
> 0 || length
< region
->length
)) {
710 printf("VM: map_new_physblock: non-full allocation requested\n");
714 /* Memory for new physical block. */
715 if(what_mem
== MAP_NONE
) {
716 allocflags
|= vrallocflags(region
->flags
);
718 if(!(memlist
= alloc_mem_in_list(length
, allocflags
))) {
719 printf("map_new_physblock: couldn't allocate\n");
724 given
.phys
= what_mem
;
725 given
.length
= length
;
729 assert(given
.length
);
734 for(ml
= memlist
; ml
; ml
= ml
->next
) {
735 struct phys_region
*newphysr
= NULL
;
736 struct phys_block
*newpb
= NULL
;
738 /* Allocate things necessary for this chunk of memory. */
739 if(!SLABALLOC(newphysr
) || !SLABALLOC(newpb
)) {
740 printf("map_new_physblock: no memory for the ph slabs\n");
741 if(newphysr
) SLABFREE(newphysr
);
742 if(newpb
) SLABFREE(newpb
);
747 /* New physical block. */
748 assert(!(ml
->phys
% VM_PAGE_SIZE
));
751 newpb
->phys
= ml
->phys
;
753 newpb
->length
= ml
->length
;
754 newpb
->firstregion
= newphysr
;);
756 /* New physical region. */
758 newphysr
->offset
= offset
;
759 newphysr
->ph
= newpb
;
760 newphysr
->parent
= region
;
761 /* No other references to this block. */
762 newphysr
->next_ph_list
= NULL
;);
764 USE(newphysr
, newphysr
->written
= written
;);
767 /* Update pagetable. */
768 if(map_ph_writept(vmp
, region
, newphysr
) != OK
) {
769 printf("map_new_physblock: map_ph_writept failed\n");
774 physr_insert(region
->phys
, newphysr
);
776 offset
+= ml
->length
;
777 mapped
+= ml
->length
;
782 offset
= start_offset
;
783 /* Things did not go well. Undo everything. */
784 for(ml
= memlist
; ml
; ml
= ml
->next
) {
785 struct phys_region
*physr
;
786 offset
+= ml
->length
;
787 if((physr
= physr_search(region
->phys
, offset
,
789 assert(physr
->ph
->refcount
== 1);
790 pb_unreferenced(region
, physr
);
791 physr_remove(region
->phys
, physr
->offset
);
795 } else assert(mapped
== length
);
797 /* Always clean up the memlist itself, even if everything
798 * worked we're not using the memlist nodes any more. And
799 * the memory they reference is either freed above or in use.
801 free_mem_list(memlist
, 0);
804 SANITYCHECK(SCL_FUNCTIONS
);
809 /*===========================================================================*
810 * map_clone_ph_block *
811 *===========================================================================*/
812 PRIVATE
struct phys_region
*map_clone_ph_block(vmp
, region
, ph
, iter
)
814 struct vir_region
*region
;
815 struct phys_region
*ph
;
818 vir_bytes offset
, length
;
822 struct phys_region
*newpr
;
825 written
= ph
->written
;
827 SANITYCHECK(SCL_FUNCTIONS
);
829 /* Warning: this function will free the passed
830 * phys_region *ph and replace it (in the same offset)
831 * with one or more others! So both the pointer to it
832 * and any iterators over the phys_regions in the vir_region
833 * will be invalid on successful return. (Iterators over
834 * the vir_region could be invalid on unsuccessful return too.)
837 /* This function takes a physical block, copies its contents
838 * into newly allocated memory, and replaces the single physical
839 * block by one or more physical blocks with refcount 1 with the
840 * same contents as the original. In other words, a fragmentable
841 * version of map_copy_ph_block().
844 /* Remember where and how much. */
846 length
= ph
->ph
->length
;
847 physaddr
= ph
->ph
->phys
;
849 /* Now unlink the original physical block so we can replace
853 SANITYCHECK(SCL_DETAIL
);
856 assert(ph
->ph
->refcount
> 1);
857 pb_unreferenced(region
, ph
);
858 assert(ph
->ph
->refcount
>= 1);
859 physr_remove(region
->phys
, offset
);
862 SANITYCHECK(SCL_DETAIL
);
864 /* Put new free memory in. */
865 allocflags
= vrallocflags(region
->flags
);
866 assert(!(allocflags
& PAF_CONTIG
));
867 assert(!(allocflags
& PAF_CLEAR
));
869 if(map_new_physblock(vmp
, region
, offset
, length
,
870 MAP_NONE
, allocflags
, written
) != OK
) {
871 /* XXX original range now gone. */
872 free_mem_list(ml
, 0);
873 printf("VM: map_clone_ph_block: map_new_physblock failed.\n");
877 /* Copy the block to the new memory.
878 * Can only fail if map_new_physblock didn't do what we asked.
880 if(copy_abs2region(physaddr
, region
, offset
, length
) != OK
)
881 panic("copy_abs2region failed, no good reason for that");
883 newpr
= physr_search(region
->phys
, offset
, AVL_EQUAL
);
885 assert(newpr
->offset
== offset
);
888 physr_start_iter(region
->phys
, iter
, offset
, AVL_EQUAL
);
889 assert(physr_get_iter(iter
) == newpr
);
892 SANITYCHECK(SCL_FUNCTIONS
);
898 /*===========================================================================*
900 *===========================================================================*/
901 PUBLIC
int map_pf(vmp
, region
, offset
, write
)
903 struct vir_region
*region
;
908 struct phys_region
*ph
;
912 assert(offset
< region
->length
);
914 assert(region
->flags
& VR_ANON
);
915 assert(!(region
->vaddr
% VM_PAGE_SIZE
));
917 virpage
= offset
- offset
% VM_PAGE_SIZE
;
919 SANITYCHECK(SCL_FUNCTIONS
);
921 if((ph
= physr_search(region
->phys
, offset
, AVL_LESS_EQUAL
)) &&
922 (ph
->offset
<= offset
&& offset
< ph
->offset
+ ph
->ph
->length
)) {
923 phys_bytes blockoffset
= ph
->offset
;
924 /* Pagefault in existing block. Do copy-on-write. */
926 assert(region
->flags
& VR_WRITABLE
);
927 assert(ph
->ph
->refcount
> 0);
929 if(WRITABLE(region
, ph
->ph
)) {
930 r
= map_ph_writept(vmp
, region
, ph
);
932 printf("map_ph_writept failed\n");
934 if(ph
->ph
->refcount
> 0
935 && ph
->ph
->share_flag
!= PBSH_COW
) {
936 printf("VM: write RO mapped pages.\n");
939 if(!map_clone_ph_block(vmp
, region
, ph
, NULL
))
944 /* Pagefault in non-existing block. Map in new block. */
945 if(map_new_physblock(vmp
, region
, virpage
,
946 VM_PAGE_SIZE
, MAP_NONE
, PAF_CLEAR
, 0) != OK
) {
947 printf("map_new_physblock failed\n");
952 SANITYCHECK(SCL_FUNCTIONS
);
955 printf("VM: map_pf: failed (%d)\n", r
);
960 if(OK
!= pt_checkrange(&vmp
->vm_pt
, region
->vaddr
+virpage
,
961 VM_PAGE_SIZE
, write
)) {
962 panic("map_pf: pt_checkrange failed: %d", r
);
969 /*===========================================================================*
970 * map_handle_memory *
971 *===========================================================================*/
972 PUBLIC
int map_handle_memory(vmp
, region
, offset
, length
, write
)
974 struct vir_region
*region
;
975 vir_bytes offset
, length
;
978 struct phys_region
*physr
, *nextphysr
;
982 #define FREE_RANGE_HERE(er1, er2) { \
983 struct phys_region *r1 = (er1), *r2 = (er2); \
984 vir_bytes start = offset, end = offset + length; \
986 start = MAX(start, r1->offset + r1->ph->length); } \
988 end = MIN(end, r2->offset); } \
991 SANITYCHECK(SCL_DETAIL); \
992 if(map_new_physblock(vmp, region, start, \
993 end-start, MAP_NONE, PAF_CLEAR, 0) != OK) { \
994 SANITYCHECK(SCL_DETAIL); \
1001 SANITYCHECK(SCL_FUNCTIONS
);
1003 assert(region
->flags
& VR_ANON
);
1004 assert(!(region
->vaddr
% VM_PAGE_SIZE
));
1005 assert(!(offset
% VM_PAGE_SIZE
));
1006 assert(!(length
% VM_PAGE_SIZE
));
1007 assert(!write
|| (region
->flags
& VR_WRITABLE
));
1009 physr_start_iter(region
->phys
, &iter
, offset
, AVL_LESS_EQUAL
);
1010 physr
= physr_get_iter(&iter
);
1013 physr_start_iter(region
->phys
, &iter
, offset
, AVL_GREATER_EQUAL
);
1014 physr
= physr_get_iter(&iter
);
1017 FREE_RANGE_HERE(NULL
, physr
);
1020 physr
= reset_physr_iter(region
, &iter
, physr
->offset
);
1021 if(physr
->offset
+ physr
->ph
->length
<= offset
) {
1022 physr_incr_iter(&iter
);
1023 physr
= physr_get_iter(&iter
);
1025 FREE_RANGE_HERE(NULL
, physr
);
1027 physr
= reset_physr_iter(region
, &iter
,
1036 SANITYCHECK(SCL_DETAIL
);
1039 assert(physr
->ph
->refcount
> 0);
1040 if(!WRITABLE(region
, physr
->ph
)) {
1041 if(!(physr
= map_clone_ph_block(vmp
, region
,
1043 printf("VM: map_handle_memory: no copy\n");
1048 SANITYCHECK(SCL_DETAIL
);
1049 if((r
=map_ph_writept(vmp
, region
, physr
)) != OK
) {
1050 printf("VM: map_ph_writept failed\n");
1054 SANITYCHECK(SCL_DETAIL
);
1058 SANITYCHECK(SCL_DETAIL
);
1059 physr_incr_iter(&iter
);
1060 nextphysr
= physr_get_iter(&iter
);
1061 FREE_RANGE_HERE(physr
, nextphysr
);
1062 SANITYCHECK(SCL_DETAIL
);
1064 if(nextphysr
->offset
>= offset
+ length
)
1066 nextphysr
= reset_physr_iter(region
, &iter
,
1072 SANITYCHECK(SCL_FUNCTIONS
);
1076 printf("region start at 0x%lx offset 0x%lx len 0x%lx write %d\n",
1077 region
->vaddr
, offset
, length
, write
);
1078 printf("no changes in map_handle_memory\n");
1084 if(OK
!= pt_checkrange(&vmp
->vm_pt
, region
->vaddr
+offset
, length
, write
)) {
1085 printf("handle mem %s-", arch_map2str(vmp
, region
->vaddr
+offset
));
1086 printf("%s failed\n", arch_map2str(vmp
, region
->vaddr
+offset
+length
));
1087 map_printregion(vmp
, region
);
1088 panic("checkrange failed");
1096 static int countregions(struct vir_region
*vr
)
1099 struct phys_region
*ph
;
1101 physr_start_iter_least(vr
->phys
, &iter
);
1102 while((ph
= physr_get_iter(&iter
))) {
1104 physr_incr_iter(&iter
);
1110 /*===========================================================================*
1112 *===========================================================================*/
1113 PRIVATE
struct vir_region
*map_copy_region(struct vmproc
*vmp
, struct vir_region
*vr
)
1115 /* map_copy_region creates a complete copy of the vir_region
1116 * data structure, linking in the same phys_blocks directly,
1117 * but all in limbo, i.e., the caller has to link the vir_region
1118 * to a process. Therefore it doesn't increase the refcount in
1119 * the phys_block; the caller has to do this once it's linked.
1120 * The reason for this is to keep the sanity checks working
1121 * within this function.
1123 struct vir_region
*newvr
;
1124 struct phys_region
*ph
;
1129 cr
= countregions(vr
);
1132 if(!SLABALLOC(newvr
))
1142 newvr
->phys
= phavl
;
1144 physr_init(newvr
->phys
);
1146 physr_start_iter_least(vr
->phys
, &iter
);
1147 while((ph
= physr_get_iter(&iter
))) {
1148 struct phys_region
*newph
;
1149 if(!SLABALLOC(newph
)) {
1150 map_free(vmp
, newvr
);
1155 newph
->next_ph_list
= NULL
;
1156 newph
->parent
= newvr
;
1157 newph
->offset
= ph
->offset
;);
1159 USE(newph
, newph
->written
= 0;);
1161 physr_insert(newvr
->phys
, newph
);
1163 assert(countregions(vr
) == cr
);
1165 physr_incr_iter(&iter
);
1169 assert(countregions(vr
) == countregions(newvr
));
1175 /*===========================================================================*
1177 *===========================================================================*/
1178 PUBLIC
int copy_abs2region(phys_bytes abs
, struct vir_region
*destregion
,
1179 phys_bytes offset
, phys_bytes len
)
1183 assert(destregion
->phys
);
1185 phys_bytes sublen
, suboffset
;
1186 struct phys_region
*ph
;
1188 assert(destregion
->phys
);
1189 if(!(ph
= physr_search(destregion
->phys
, offset
, AVL_LESS_EQUAL
))) {
1190 printf("VM: copy_abs2region: no phys region found (1).\n");
1193 assert(ph
->offset
<= offset
);
1194 if(ph
->offset
+ph
->ph
->length
<= offset
) {
1195 printf("VM: copy_abs2region: no phys region found (2).\n");
1198 suboffset
= offset
- ph
->offset
;
1199 assert(suboffset
< ph
->ph
->length
);
1201 if(sublen
> ph
->ph
->length
- suboffset
)
1202 sublen
= ph
->ph
->length
- suboffset
;
1203 assert(suboffset
+ sublen
<= ph
->ph
->length
);
1204 if(ph
->ph
->refcount
!= 1) {
1205 printf("VM: copy_abs2region: no phys region found (3).\n");
1209 if(sys_abscopy(abs
, ph
->ph
->phys
+ suboffset
, sublen
) != OK
) {
1210 printf("VM: copy_abs2region: abscopy failed.\n");
1221 /*=========================================================================*
1223 *=========================================================================*/
1224 PUBLIC
int map_writept(struct vmproc
*vmp
)
1226 struct vir_region
*vr
;
1227 struct phys_region
*ph
;
1230 for(vr
= vmp
->vm_regions
; vr
; vr
= vr
->next
) {
1232 physr_start_iter_least(vr
->phys
, &iter
);
1233 while((ph
= physr_get_iter(&iter
))) {
1234 physr_incr_iter(&iter
);
1236 /* If this phys block is shared as SMAP, then do
1237 * not update the page table. */
1238 if(ph
->ph
->refcount
> 1
1239 && ph
->ph
->share_flag
== PBSH_SMAP
) {
1243 if((r
=map_ph_writept(vmp
, vr
, ph
)) != OK
) {
1244 printf("VM: map_writept: failed\n");
1253 /*========================================================================*
1255 *========================================================================*/
1256 PUBLIC
int map_proc_copy(dst
, src
)
1260 struct vir_region
*vr
, *prevvr
= NULL
;
1261 dst
->vm_regions
= NULL
;
1263 SANITYCHECK(SCL_FUNCTIONS
);
1266 for(vr
= src
->vm_regions
; vr
; vr
= vr
->next
) {
1267 physr_iter iter_orig
, iter_new
;
1268 struct vir_region
*newvr
;
1269 struct phys_region
*orig_ph
, *new_ph
;
1270 if(!(newvr
= map_copy_region(dst
, vr
))) {
1274 USE(newvr
, newvr
->parent
= dst
;);
1275 if(prevvr
) { USE(prevvr
, prevvr
->next
= newvr
;); }
1276 else { dst
->vm_regions
= newvr
; }
1277 physr_start_iter_least(vr
->phys
, &iter_orig
);
1278 physr_start_iter_least(newvr
->phys
, &iter_new
);
1279 while((orig_ph
= physr_get_iter(&iter_orig
))) {
1280 struct phys_block
*pb
;
1281 new_ph
= physr_get_iter(&iter_new
);
1282 /* Check two physregions both are nonnull,
1283 * are different, and match physblocks.
1287 assert(orig_ph
!= new_ph
);
1289 assert(pb
== new_ph
->ph
);
1291 /* Link in new physregion. */
1292 assert(!new_ph
->next_ph_list
);
1293 USE(new_ph
, new_ph
->next_ph_list
= pb
->firstregion
;);
1294 USE(pb
, pb
->firstregion
= new_ph
;);
1296 /* Increase phys block refcount */
1297 assert(pb
->refcount
> 0);
1298 USE(pb
, pb
->refcount
++;);
1299 assert(pb
->refcount
> 1);
1301 /* If the phys block has been shared as SMAP,
1302 * do the regular copy. */
1303 if(pb
->refcount
> 2 && pb
->share_flag
== PBSH_SMAP
) {
1304 map_clone_ph_block(dst
, newvr
,new_ph
,
1307 USE(pb
, pb
->share_flag
= PBSH_COW
;);
1310 /* Get next new physregion */
1311 physr_incr_iter(&iter_orig
);
1312 physr_incr_iter(&iter_new
);
1314 assert(!physr_get_iter(&iter_new
));
1321 SANITYCHECK(SCL_FUNCTIONS
);
1325 /*========================================================================*
1327 *========================================================================*/
1328 PUBLIC
struct vir_region
*map_proc_kernel(struct vmproc
*vmp
)
1330 struct vir_region
*vr
;
1332 /* We assume these are the first regions to be mapped to
1333 * make the function a bit simpler (free all regions on error).
1335 assert(!vmp
->vm_regions
);
1336 assert(vmproc
[VMP_SYSTEM
].vm_flags
& VMF_INUSE
);
1337 assert(!(KERNEL_TEXT
% VM_PAGE_SIZE
));
1338 assert(!(KERNEL_TEXT_LEN
% VM_PAGE_SIZE
));
1339 assert(!(KERNEL_DATA
% VM_PAGE_SIZE
));
1340 assert(!(KERNEL_DATA_LEN
% VM_PAGE_SIZE
));
1342 if(!(vr
= map_page_region(vmp
, KERNEL_TEXT
, 0, KERNEL_TEXT_LEN
,
1343 KERNEL_TEXT
, VR_DIRECT
| VR_WRITABLE
| VR_NOPF
, 0)) ||
1344 !(vr
= map_page_region(vmp
, KERNEL_DATA
, 0, KERNEL_DATA_LEN
,
1345 KERNEL_DATA
, VR_DIRECT
| VR_WRITABLE
| VR_NOPF
, 0))) {
1350 return vr
; /* Return pointer not useful, just non-NULL. */
1353 /*========================================================================*
1354 * map_region_extend *
1355 *========================================================================*/
1356 PUBLIC
int map_region_extend(struct vmproc
*vmp
, struct vir_region
*vr
,
1362 assert(vr
->flags
& VR_ANON
);
1363 assert(!(delta
% VM_PAGE_SIZE
));
1365 if(!delta
) return OK
;
1366 end
= vr
->vaddr
+ vr
->length
;
1367 assert(end
>= vr
->vaddr
);
1369 if(end
+ delta
<= end
) {
1370 printf("VM: strange delta 0x%lx\n", delta
);
1374 if(!vr
->next
|| end
+ delta
<= vr
->next
->vaddr
) {
1375 USE(vr
, vr
->length
+= delta
;);
1384 /*========================================================================*
1385 * map_region_shrink *
1386 *========================================================================*/
1387 PUBLIC
int map_region_shrink(struct vir_region
*vr
, vir_bytes delta
)
1390 assert(vr
->flags
& VR_ANON
);
1391 assert(!(delta
% VM_PAGE_SIZE
));
1394 printf("VM: ignoring region shrink\n");
1400 PUBLIC
struct vir_region
*map_region_lookup_tag(vmp
, tag
)
1404 struct vir_region
*vr
;
1406 for(vr
= vmp
->vm_regions
; vr
; vr
= vr
->next
)
1413 PUBLIC
void map_region_set_tag(struct vir_region
*vr
, u32_t tag
)
1415 USE(vr
, vr
->tag
= tag
;);
1418 PUBLIC u32_t
map_region_get_tag(struct vir_region
*vr
)
1423 /*========================================================================*
1424 * map_unmap_region *
1425 *========================================================================*/
1426 PUBLIC
int map_unmap_region(struct vmproc
*vmp
, struct vir_region
*region
,
1429 /* Shrink the region by 'len' bytes, from the start. Unreference
1430 * memory it used to reference if any.
1432 struct vir_region
*r
, *nextr
, *prev
= NULL
;
1433 vir_bytes regionstart
;
1435 SANITYCHECK(SCL_FUNCTIONS
);
1437 for(r
= vmp
->vm_regions
; r
; r
= r
->next
) {
1444 SANITYCHECK(SCL_DETAIL
);
1447 panic("map_unmap_region: region not found");
1449 if(len
> r
->length
|| (len
% VM_PAGE_SIZE
)) {
1450 printf("VM: bogus length 0x%lx\n", len
);
1454 if(!(r
->flags
& (VR_ANON
|VR_DIRECT
))) {
1455 printf("VM: only unmap anonymous or direct memory\n");
1459 regionstart
= r
->vaddr
;
1461 if(len
== r
->length
) {
1462 /* Whole region disappears. Unlink and free it. */
1464 vmp
->vm_regions
= r
->next
;
1466 USE(prev
, prev
->next
= r
->next
;);
1470 struct phys_region
*pr
;
1472 /* Region shrinks. First unreference its memory
1473 * and then shrink the region.
1475 map_subfree(vmp
, r
, len
);
1479 physr_start_iter_least(r
->phys
, &iter
);
1481 /* vaddr has increased; to make all the phys_regions
1482 * point to the same addresses, make them shrink by the
1485 while((pr
= physr_get_iter(&iter
))) {
1486 assert(pr
->offset
>= len
);
1487 USE(pr
, pr
->offset
-= len
;);
1488 physr_incr_iter(&iter
);
1492 SANITYCHECK(SCL_DETAIL
);
1494 if(pt_writemap(&vmp
->vm_pt
, regionstart
,
1495 MAP_NONE
, len
, 0, WMF_OVERWRITE
) != OK
) {
1496 printf("VM: map_unmap_region: pt_writemap failed\n");
1500 SANITYCHECK(SCL_FUNCTIONS
);
1505 /*========================================================================*
1507 *========================================================================*/
1508 PUBLIC
int map_remap(struct vmproc
*dvmp
, vir_bytes da
, size_t size
,
1509 struct vir_region
*region
, vir_bytes
*r
)
1511 struct vir_region
*vr
, *prev
;
1512 struct phys_region
*ph
;
1513 vir_bytes startv
, dst_addr
;
1516 SANITYCHECK(SCL_FUNCTIONS
);
1518 assert(region
->flags
& VR_SHARED
);
1520 /* da is handled differently */
1522 dst_addr
= dvmp
->vm_stacktop
;
1525 dst_addr
= arch_vir2map(dvmp
, dst_addr
);
1528 /* round up to page size */
1529 assert(!(size
% VM_PAGE_SIZE
));
1530 startv
= region_find_slot(dvmp
, dst_addr
, VM_DATATOP
, size
, &prev
);
1531 if (startv
== (vir_bytes
) -1) {
1532 printf("map_remap: search 0x%x...\n", dst_addr
);
1536 /* when the user specifies the address, we cannot change it */
1537 if (da
&& (startv
!= dst_addr
))
1540 vr
= map_copy_region(dvmp
, region
);
1547 vr
->flags
= region
->flags
;
1549 vr
->parent
= dvmp
;);
1550 assert(vr
->flags
& VR_SHARED
);
1554 vr
->next
= prev
->next
;);
1555 USE(prev
, prev
->next
= vr
;);
1558 vr
->next
= dvmp
->vm_regions
;);
1559 dvmp
->vm_regions
= vr
;
1562 physr_start_iter_least(vr
->phys
, &iter
);
1563 while((ph
= physr_get_iter(&iter
))) {
1564 struct phys_block
*pb
= ph
->ph
;
1565 assert(!ph
->next_ph_list
);
1566 USE(ph
, ph
->next_ph_list
= pb
->firstregion
;);
1567 USE(pb
, pb
->firstregion
= ph
;);
1568 USE(pb
, pb
->refcount
++;);
1569 if(map_ph_writept(dvmp
, vr
, ph
) != OK
) {
1570 panic("map_remap: map_ph_writept failed");
1572 physr_incr_iter(&iter
);
1577 SANITYCHECK(SCL_FUNCTIONS
);
1582 /*========================================================================*
1584 *========================================================================*/
1585 PUBLIC
int map_get_phys(struct vmproc
*vmp
, vir_bytes addr
, phys_bytes
*r
)
1587 struct vir_region
*vr
;
1588 struct phys_region
*ph
;
1591 if (!(vr
= map_lookup(vmp
, addr
)) ||
1592 (vr
->vaddr
!= addr
))
1595 if (!(vr
->flags
& VR_SHARED
))
1598 physr_start_iter_least(vr
->phys
, &iter
);
1599 ph
= physr_get_iter(&iter
);
1609 /*========================================================================*
1611 *========================================================================*/
1612 PUBLIC
int map_get_ref(struct vmproc
*vmp
, vir_bytes addr
, u8_t
*cnt
)
1614 struct vir_region
*vr
;
1615 struct phys_region
*ph
;
1618 if (!(vr
= map_lookup(vmp
, addr
)) ||
1619 (vr
->vaddr
!= addr
))
1622 if (!(vr
->flags
& VR_SHARED
))
1625 physr_start_iter_least(vr
->phys
, &iter
);
1626 ph
= physr_get_iter(&iter
);
1631 *cnt
= ph
->ph
->refcount
;
1636 /*========================================================================*
1638 *========================================================================*/
1639 PUBLIC
void get_usage_info(struct vmproc
*vmp
, struct vm_usage_info
*vui
)
1641 struct vir_region
*vr
;
1643 struct phys_region
*ph
;
1646 memset(vui
, 0, sizeof(*vui
));
1648 for(vr
= vmp
->vm_regions
; vr
; vr
= vr
->next
) {
1649 physr_start_iter_least(vr
->phys
, &iter
);
1650 while((ph
= physr_get_iter(&iter
))) {
1651 len
= ph
->ph
->length
;
1653 /* All present pages are counted towards the total. */
1654 vui
->vui_total
+= len
;
1656 if (ph
->ph
->refcount
> 1) {
1657 /* Any page with a refcount > 1 is common. */
1658 vui
->vui_common
+= len
;
1660 /* Any common, non-COW page is shared. */
1661 if (vr
->flags
& VR_SHARED
||
1662 ph
->ph
->share_flag
== PBSH_SMAP
)
1663 vui
->vui_shared
+= len
;
1665 physr_incr_iter(&iter
);
1670 /*===========================================================================*
1672 *===========================================================================*/
1673 PUBLIC
int get_region_info(struct vmproc
*vmp
, struct vm_region_info
*vri
,
1674 int max
, vir_bytes
*nextp
)
1676 struct vir_region
*vr
;
1684 for(vr
= vmp
->vm_regions
; vr
; vr
= vr
->next
)
1685 if (vr
->vaddr
>= next
) break;
1689 for(count
= 0; vr
&& count
< max
; vr
= vr
->next
, count
++, vri
++) {
1690 vri
->vri_addr
= arch_map2info(vmp
, vr
->vaddr
, &vri
->vri_seg
,
1692 vri
->vri_length
= vr
->length
;
1694 /* "AND" the provided protection with per-page protection. */
1695 if (!(vr
->flags
& VR_WRITABLE
))
1696 vri
->vri_prot
&= ~PROT_WRITE
;
1698 vri
->vri_flags
= (vr
->flags
& VR_SHARED
) ? MAP_SHARED
: 0;
1700 next
= vr
->vaddr
+ vr
->length
;
1707 /*========================================================================*
1708 * regionprintstats *
1709 *========================================================================*/
1710 PUBLIC
void printregionstats(struct vmproc
*vmp
)
1712 struct vir_region
*vr
;
1713 struct phys_region
*pr
;
1715 vir_bytes used
= 0, weighted
= 0;
1717 for(vr
= vmp
->vm_regions
; vr
; vr
= vr
->next
) {
1718 if(vr
->flags
& VR_DIRECT
)
1720 physr_start_iter_least(vr
->phys
, &iter
);
1721 while((pr
= physr_get_iter(&iter
))) {
1722 physr_incr_iter(&iter
);
1723 used
+= pr
->ph
->length
;
1724 weighted
+= pr
->ph
->length
/ pr
->ph
->refcount
;
1728 printf("%6dkB %6dkB\n", used
/1024, weighted
/1024);
1733 /*===========================================================================*
1735 *===========================================================================*/
1736 PRIVATE
int do_map_memory(struct vmproc
*vms
, struct vmproc
*vmd
,
1737 struct vir_region
*vrs
, struct vir_region
*vrd
,
1738 vir_bytes offset_s
, vir_bytes offset_d
,
1739 vir_bytes length
, int flag
)
1741 struct phys_region
*prs
;
1742 struct phys_region
*newphysr
;
1743 struct phys_block
*pb
;
1745 u32_t pt_flag
= PTF_PRESENT
| PTF_USER
;
1748 SANITYCHECK(SCL_FUNCTIONS
);
1750 /* Search for the first phys region in the source process. */
1751 physr_start_iter(vrs
->phys
, &iter
, offset_s
, AVL_EQUAL
);
1752 prs
= physr_get_iter(&iter
);
1754 panic("do_map_memory: no aligned phys region: %d", 0);
1756 /* flag: 0 -> read-only
1758 * -1 -> share as COW, so read-only
1761 pt_flag
|= PTF_WRITE
;
1763 /* Map phys blocks in the source process to the destination process. */
1764 end
= offset_d
+ length
;
1765 while((prs
= physr_get_iter(&iter
)) && offset_d
< end
) {
1766 /* If a SMAP share was requested but the phys block has already
1767 * been shared as COW, copy the block for the source phys region
1771 if(flag
>= 0 && pb
->refcount
> 1
1772 && pb
->share_flag
== PBSH_COW
) {
1773 if(!(prs
= map_clone_ph_block(vms
, vrs
, prs
, &iter
)))
1778 /* Allocate a new phys region. */
1779 if(!SLABALLOC(newphysr
))
1782 /* Set and link the new phys region to the block. */
1784 newphysr
->offset
= offset_d
;
1785 newphysr
->parent
= vrd
;
1786 newphysr
->next_ph_list
= pb
->firstregion
;
1787 pb
->firstregion
= newphysr
;
1788 physr_insert(newphysr
->parent
->phys
, newphysr
);
1791 /* If a COW share was requested but the phys block has already
1792 * been shared as SMAP, give up on COW and copy the block for
1793 * the destination phys region now.
1795 if(flag
< 0 && pb
->refcount
> 1
1796 && pb
->share_flag
== PBSH_SMAP
) {
1797 if(!(newphysr
= map_clone_ph_block(vmd
, vrd
,
1803 /* See if this is a COW share or SMAP share. */
1804 if(flag
< 0) { /* COW share */
1805 pb
->share_flag
= PBSH_COW
;
1806 /* Update the page table for the src process. */
1807 pt_writemap(&vms
->vm_pt
, offset_s
+ vrs
->vaddr
,
1808 pb
->phys
, pb
->length
,
1809 pt_flag
, WMF_OVERWRITE
);
1811 else { /* SMAP share */
1812 pb
->share_flag
= PBSH_SMAP
;
1814 /* Update the page table for the destination process. */
1815 pt_writemap(&vmd
->vm_pt
, offset_d
+ vrd
->vaddr
,
1816 pb
->phys
, pb
->length
, pt_flag
, WMF_OVERWRITE
);
1819 physr_incr_iter(&iter
);
1820 offset_d
+= pb
->length
;
1821 offset_s
+= pb
->length
;
1824 SANITYCHECK(SCL_FUNCTIONS
);
1829 /*===========================================================================*
1831 *===========================================================================*/
1832 PUBLIC
int unmap_memory(endpoint_t sour
, endpoint_t dest
,
1833 vir_bytes virt_s
, vir_bytes virt_d
, vir_bytes length
, int flag
)
1836 struct vir_region
*vrd
;
1837 struct phys_region
*pr
;
1838 struct phys_block
*pb
;
1843 /* Use information on the destination process to unmap. */
1844 if(vm_isokendpt(dest
, &p
) != OK
)
1845 panic("unmap_memory: bad endpoint: %d", dest
);
1848 vrd
= map_lookup(vmd
, virt_d
);
1851 /* Search for the first phys region in the destination process. */
1852 off
= virt_d
- vrd
->vaddr
;
1853 physr_start_iter(vrd
->phys
, &iter
, off
, AVL_EQUAL
);
1854 pr
= physr_get_iter(&iter
);
1856 panic("unmap_memory: no aligned phys region: %d", 0);
1858 /* Copy the phys block now rather than doing COW. */
1860 while((pr
= physr_get_iter(&iter
)) && off
< end
) {
1862 assert(pb
->refcount
> 1);
1863 assert(pb
->share_flag
== PBSH_SMAP
);
1865 if(!(pr
= map_clone_ph_block(vmd
, vrd
, pr
, &iter
)))
1868 physr_incr_iter(&iter
);
1875 /*===========================================================================*
1877 *===========================================================================*/
1878 PRIVATE
int split_phys(struct phys_region
*pr
, vir_bytes point
)
1880 struct phys_region
*newpr
, *q
, *prev
;
1881 struct phys_block
*newpb
;
1882 struct phys_block
*pb
= pr
->ph
;
1883 /* Split the phys region into 2 parts by @point. */
1885 if(pr
->offset
>= point
|| pr
->offset
+ pb
->length
<= point
)
1887 if(!SLABALLOC(newpb
))
1890 /* Split phys block. */
1892 pb
->length
= point
- pr
->offset
;
1893 newpb
->length
-= pb
->length
;
1894 newpb
->phys
+= pb
->length
;
1896 /* Split phys regions in a list. */
1897 for(q
= pb
->firstregion
; q
; q
= q
->next_ph_list
) {
1898 if(!SLABALLOC(newpr
))
1903 newpr
->offset
+= pb
->length
;
1905 /* Link to the vir region's phys region list. */
1906 physr_insert(newpr
->parent
->phys
, newpr
);
1908 /* Link to the next_ph_list. */
1909 if(q
== pb
->firstregion
) {
1910 newpb
->firstregion
= newpr
;
1913 prev
->next_ph_list
= newpr
;
1917 prev
->next_ph_list
= NULL
;
1922 /*===========================================================================*
1923 * clean_phys_regions *
1924 *===========================================================================*/
1925 PRIVATE
void clean_phys_regions(struct vir_region
*region
,
1926 vir_bytes offset
, vir_bytes length
)
1928 /* Consider @offset as the start address and @offset+length as the end address.
1929 * If there are phys regions crossing the start address or the end address,
1930 * split them into 2 parts.
1932 * We assume that the phys regions are listed in order and don't overlap.
1934 struct phys_region
*pr
;
1937 physr_start_iter_least(region
->phys
, &iter
);
1938 while((pr
= physr_get_iter(&iter
))) {
1939 /* If this phys region crosses the start address, split it. */
1940 if(pr
->offset
< offset
1941 && pr
->offset
+ pr
->ph
->length
> offset
) {
1942 split_phys(pr
, offset
);
1943 physr_start_iter_least(region
->phys
, &iter
);
1945 /* If this phys region crosses the end address, split it. */
1946 else if(pr
->offset
< offset
+ length
1947 && pr
->offset
+ pr
->ph
->length
> offset
+ length
) {
1948 split_phys(pr
, offset
+ length
);
1949 physr_start_iter_least(region
->phys
, &iter
);
1952 physr_incr_iter(&iter
);
1957 /*===========================================================================*
1959 *===========================================================================*/
1960 PRIVATE
void rm_phys_regions(struct vir_region
*region
,
1961 vir_bytes begin
, vir_bytes length
)
1963 /* Remove all phys regions between @begin and @begin+length.
1965 * Don't update the page table, because we will update it at map_memory()
1968 struct phys_region
*pr
;
1971 physr_start_iter(region
->phys
, &iter
, begin
, AVL_GREATER_EQUAL
);
1972 while((pr
= physr_get_iter(&iter
)) && pr
->offset
< begin
+ length
) {
1973 pb_unreferenced(region
, pr
);
1974 physr_remove(region
->phys
, pr
->offset
);
1975 physr_start_iter(region
->phys
, &iter
, begin
,
1981 /*===========================================================================*
1983 *===========================================================================*/
1984 PUBLIC
int map_memory(endpoint_t sour
, endpoint_t dest
,
1985 vir_bytes virt_s
, vir_bytes virt_d
, vir_bytes length
, int flag
)
1987 /* This is the entry point. This function will be called by handle_memory() when
1988 * VM recieves a map-memory request.
1990 struct vmproc
*vms
, *vmd
;
1991 struct vir_region
*vrs
, *vrd
;
1993 vir_bytes offset_s
, offset_d
;
1997 if(vm_isokendpt(sour
, &p
) != OK
)
1998 panic("map_memory: bad endpoint: %d", sour
);
2000 if(vm_isokendpt(dest
, &p
) != OK
)
2001 panic("map_memory: bad endpoint: %d", dest
);
2004 vrs
= map_lookup(vms
, virt_s
);
2006 vrd
= map_lookup(vmd
, virt_d
);
2009 /* Linear address -> offset from start of vir region. */
2010 offset_s
= virt_s
- vrs
->vaddr
;
2011 offset_d
= virt_d
- vrd
->vaddr
;
2013 /* Make sure that the range in the source process has been mapped
2014 * to physical memory.
2016 map_handle_memory(vms
, vrs
, offset_s
, length
, 0);
2019 clean_phys_regions(vrs
, offset_s
, length
);
2020 clean_phys_regions(vrd
, offset_d
, length
);
2021 rm_phys_regions(vrd
, offset_d
, length
);
2024 r
= do_map_memory(vms
, vmd
, vrs
, vrd
, offset_s
, offset_d
, length
, flag
);
2029 /*========================================================================*
2031 *========================================================================*/
2033 map_lookup_phys(struct vmproc
*vmp
, u32_t tag
)
2035 struct vir_region
*vr
;
2036 struct phys_region
*pr
;
2039 if(!(vr
= map_region_lookup_tag(vmp
, tag
))) {
2040 printf("VM: request for phys of missing region\n");
2044 physr_start_iter_least(vr
->phys
, &iter
);
2046 if(!(pr
= physr_get_iter(&iter
))) {
2047 printf("VM: request for phys of unmapped region\n");
2051 if(pr
->offset
!= 0 || pr
->ph
->length
!= vr
->length
) {
2052 printf("VM: request for phys of partially mapped region\n");
2056 return pr
->ph
->phys
;