5 #include <minix/callnr.h>
6 #include <minix/type.h>
7 #include <minix/config.h>
8 #include <minix/const.h>
9 #include <minix/sysutil.h>
10 #include <minix/syslib.h>
11 #include <minix/debug.h>
12 #include <minix/bitmap.h>
28 #include "sanitycheck.h"
32 /* Should a physblock be mapped writable? */
33 #define WRITABLE(r, pb) \
34 (((r)->flags & (VR_DIRECT | VR_SHARED)) || \
35 (((r)->flags & VR_WRITABLE) && (pb)->refcount == 1))
37 FORWARD
_PROTOTYPE(int map_new_physblock
, (struct vmproc
*vmp
,
38 struct vir_region
*region
, vir_bytes offset
, vir_bytes length
,
39 phys_bytes what
, u32_t allocflags
, int written
));
41 FORWARD
_PROTOTYPE(int map_ph_writept
, (struct vmproc
*vmp
, struct vir_region
*vr
,
42 struct phys_region
*pr
));
44 FORWARD
_PROTOTYPE(struct vir_region
*map_copy_region
, (struct vmproc
*vmp
, struct vir_region
*vr
));
46 FORWARD
_PROTOTYPE(struct phys_region
*map_clone_ph_block
, (struct vmproc
*vmp
,
47 struct vir_region
*region
, struct phys_region
*ph
, physr_iter
*iter
));
49 PRIVATE
char *map_name(struct vir_region
*vr
)
51 static char name
[100];
53 int type
= vr
->flags
& (VR_ANON
|VR_DIRECT
);
56 typename
= "anonymous";
62 panic("unknown mapping type: %d", type
);
79 tag
= "unknown tag value";
83 sprintf(name
, "%s, %s", typename
, tag
);
88 PUBLIC
void map_printregion(struct vmproc
*vmp
, struct vir_region
*vr
)
91 struct phys_region
*ph
;
92 printf("map_printmap: map_name: %s\n", map_name(vr
));
93 printf("\t%s (len 0x%lx, %dkB), %s\n",
94 arch_map2str(vmp
, vr
->vaddr
), vr
->length
,
95 vr
->length
/1024, map_name(vr
));
96 printf("\t\tphysblocks:\n");
97 physr_start_iter_least(vr
->phys
, &iter
);
98 while((ph
= physr_get_iter(&iter
))) {
99 printf("\t\t@ %s (refs %d): phys 0x%lx len 0x%lx\n",
100 arch_map2str(vmp
, vr
->vaddr
+ ph
->offset
),
101 ph
->ph
->refcount
, ph
->ph
->phys
, ph
->ph
->length
);
102 physr_incr_iter(&iter
);
106 /*===========================================================================*
108 *===========================================================================*/
109 PUBLIC
void map_printmap(vmp
)
112 struct vir_region
*vr
;
114 printf("memory regions in process %d:\n", vmp
->vm_endpoint
);
115 for(vr
= vmp
->vm_regions
; vr
; vr
= vr
->next
) {
116 map_printregion(vmp
, vr
);
124 /*===========================================================================*
125 * map_sanitycheck_pt *
126 *===========================================================================*/
127 PRIVATE
int map_sanitycheck_pt(struct vmproc
*vmp
,
128 struct vir_region
*vr
, struct phys_region
*pr
)
130 struct phys_block
*pb
= pr
->ph
;
134 if(!(vmp
->vm_flags
& VMF_HASPT
))
142 r
= pt_writemap(&vmp
->vm_pt
, vr
->vaddr
+ pr
->offset
,
143 pb
->phys
, pb
->length
, PTF_PRESENT
| PTF_USER
| rw
, WMF_VERIFY
);
146 printf("proc %d phys_region 0x%lx sanity check failed\n",
147 vmp
->vm_endpoint
, pr
->offset
);
148 map_printregion(vmp
, vr
);
154 /*===========================================================================*
156 *===========================================================================*/
157 PUBLIC
void map_sanitycheck(char *file
, int line
)
161 /* Macro for looping over all physical blocks of all regions of
164 #define ALLREGIONS(regioncode, physcode) \
165 for(vmp = vmproc; vmp < &vmproc[VMP_NR]; vmp++) { \
166 struct vir_region *vr; \
167 if(!(vmp->vm_flags & VMF_INUSE)) \
169 for(vr = vmp->vm_regions; vr; vr = vr->next) { \
171 struct phys_region *pr; \
173 physr_start_iter_least(vr->phys, &iter); \
174 while((pr = physr_get_iter(&iter))) { \
176 physr_incr_iter(&iter); \
181 #define MYSLABSANE(s) MYASSERT(slabsane_f(__FILE__, __LINE__, s, sizeof(*(s))))
182 /* Basic pointers check. */
183 ALLREGIONS(MYSLABSANE(vr
),MYSLABSANE(pr
); MYSLABSANE(pr
->ph
);MYSLABSANE(pr
->parent
));
184 ALLREGIONS(/* MYASSERT(vr->parent == vmp) */,MYASSERT(pr
->parent
== vr
););
186 /* Do counting for consistency check. */
187 ALLREGIONS(;,USE(pr
->ph
, pr
->ph
->seencount
= 0;););
188 ALLREGIONS(;,USE(pr
->ph
, pr
->ph
->seencount
++;);
189 if(pr
->ph
->seencount
== 1) {
190 MYASSERT(usedpages_add(pr
->ph
->phys
,
191 pr
->ph
->length
) == OK
);
195 /* Do consistency check. */
196 ALLREGIONS(if(vr
->next
) {
197 MYASSERT(vr
->vaddr
< vr
->next
->vaddr
);
198 MYASSERT(vr
->vaddr
+ vr
->length
<= vr
->next
->vaddr
);
200 MYASSERT(!(vr
->vaddr
% VM_PAGE_SIZE
));,
201 if(pr
->ph
->refcount
!= pr
->ph
->seencount
) {
203 printf("ph in vr 0x%lx: 0x%lx-0x%lx refcount %d "
204 "but seencount %lu\n",
206 pr
->offset
+ pr
->ph
->length
,
207 pr
->ph
->refcount
, pr
->ph
->seencount
);
211 struct phys_region
*others
;
212 if(pr
->ph
->refcount
> 0) {
213 MYASSERT(pr
->ph
->firstregion
);
214 if(pr
->ph
->refcount
== 1) {
215 MYASSERT(pr
->ph
->firstregion
== pr
);
218 MYASSERT(!pr
->ph
->firstregion
);
220 for(others
= pr
->ph
->firstregion
; others
;
221 others
= others
->next_ph_list
) {
223 MYASSERT(others
->ph
== pr
->ph
);
226 MYASSERT(pr
->ph
->refcount
== n_others
);
228 MYASSERT(pr
->ph
->refcount
== pr
->ph
->seencount
);
229 MYASSERT(!(pr
->offset
% VM_PAGE_SIZE
));
230 MYASSERT(!(pr
->ph
->length
% VM_PAGE_SIZE
)););
231 ALLREGIONS(,MYASSERT(map_sanitycheck_pt(vmp
, vr
, pr
) == OK
));
236 /*=========================================================================*
238 *=========================================================================*/
239 PRIVATE
int map_ph_writept(struct vmproc
*vmp
, struct vir_region
*vr
,
240 struct phys_region
*pr
)
243 struct phys_block
*pb
= pr
->ph
;
245 assert(!(vr
->vaddr
% VM_PAGE_SIZE
));
246 assert(!(pb
->length
% VM_PAGE_SIZE
));
247 assert(!(pr
->offset
% VM_PAGE_SIZE
));
248 assert(pb
->refcount
> 0);
255 if(pt_writemap(&vmp
->vm_pt
, vr
->vaddr
+ pr
->offset
,
256 pb
->phys
, pb
->length
, PTF_PRESENT
| PTF_USER
| rw
,
260 WMF_OVERWRITE
) != OK
) {
261 printf("VM: map_writept: pt_writemap failed\n");
266 USE(pr
, pr
->written
= 1;);
272 /*===========================================================================*
274 *===========================================================================*/
275 PRIVATE vir_bytes
region_find_slot(struct vmproc
*vmp
,
276 vir_bytes minv
, vir_bytes maxv
, vir_bytes length
,
277 struct vir_region
**prev
)
279 struct vir_region
*firstregion
= vmp
->vm_regions
, *prevregion
= NULL
;
283 SANITYCHECK(SCL_FUNCTIONS
);
285 /* We must be in paged mode to be able to do this. */
288 /* Length must be reasonable. */
291 /* Special case: allow caller to set maxv to 0 meaning 'I want
292 * it to be mapped in right here.'
295 maxv
= minv
+ length
;
299 printf("region_find_slot: minv 0x%lx and bytes 0x%lx\n",
302 return (vir_bytes
) -1;
306 /* Basic input sanity checks. */
307 assert(!(length
% VM_PAGE_SIZE
));
309 printf("VM: 1 minv: 0x%lx maxv: 0x%lx length: 0x%lx\n",
313 assert(minv
+ length
<= maxv
);
315 #define FREEVRANGE(rangestart, rangeend, foundcode) { \
316 vir_bytes frstart = (rangestart), frend = (rangeend); \
317 frstart = MAX(frstart, minv); \
318 frend = MIN(frend, maxv); \
319 if(frend > frstart && (frend - frstart) >= length) { \
325 /* This is the free virtual address space before the first region. */
326 FREEVRANGE(0, firstregion
? firstregion
->vaddr
: VM_DATATOP
, ;);
329 struct vir_region
*vr
;
330 for(vr
= vmp
->vm_regions
; vr
&& !foundflag
; vr
= vr
->next
) {
331 FREEVRANGE(vr
->vaddr
+ vr
->length
,
332 vr
->next
? vr
->next
->vaddr
: VM_DATATOP
,
338 printf("VM: region_find_slot: no 0x%lx bytes found for %d between 0x%lx and 0x%lx\n",
339 length
, vmp
->vm_endpoint
, minv
, maxv
);
341 return (vir_bytes
) -1;
345 if(prevregion
) assert(prevregion
->vaddr
< startv
);
348 /* However we got it, startv must be in the requested range. */
349 assert(startv
>= minv
);
350 assert(startv
< maxv
);
351 assert(startv
+ length
<= maxv
);
358 /*===========================================================================*
360 *===========================================================================*/
361 PUBLIC
struct vir_region
*map_page_region(vmp
, minv
, maxv
, length
,
362 what
, flags
, mapflags
)
371 struct vir_region
*prevregion
= NULL
, *newregion
;
373 struct phys_region
*ph
;
376 assert(!(length
% VM_PAGE_SIZE
));
378 SANITYCHECK(SCL_FUNCTIONS
);
380 startv
= region_find_slot(vmp
, minv
, maxv
, length
, &prevregion
);
381 if (startv
== (vir_bytes
) -1)
384 /* Now we want a new region. */
385 if(!SLABALLOC(newregion
)) {
386 printf("VM: map_page_region: allocating region failed\n");
390 /* Fill in node details. */
392 newregion
->vaddr
= startv
;
393 newregion
->length
= length
;
394 newregion
->flags
= flags
;
395 newregion
->tag
= VRT_NONE
;
396 newregion
->parent
= vmp
;);
400 printf("VM: map_page_region: allocating phys avl failed\n");
404 USE(newregion
, newregion
->phys
= phavl
;);
406 physr_init(newregion
->phys
);
408 /* If we know what we're going to map to, map it right away. */
409 if(what
!= MAP_NONE
) {
410 assert(what
); /* mapping in 0 is unlikely to be right */
411 assert(!(what
% VM_PAGE_SIZE
));
412 assert(!(startv
% VM_PAGE_SIZE
));
413 assert(!(mapflags
& MF_PREALLOC
));
414 if(map_new_physblock(vmp
, newregion
, 0, length
,
415 what
, PAF_CLEAR
, 0) != OK
) {
416 printf("VM: map_new_physblock failed\n");
418 SLABFREE(newregion
->phys
););
424 if((flags
& VR_ANON
) && (mapflags
& MF_PREALLOC
)) {
425 if(map_handle_memory(vmp
, newregion
, 0, length
, 1) != OK
) {
426 printf("VM: map_page_region: prealloc failed\n");
428 SLABFREE(newregion
->phys
););
436 assert(prevregion
->vaddr
< newregion
->vaddr
);
437 USE(newregion
, newregion
->next
= prevregion
->next
;);
438 USE(prevregion
, prevregion
->next
= newregion
;);
440 USE(newregion
, newregion
->next
= vmp
->vm_regions
;);
441 vmp
->vm_regions
= newregion
;
445 assert(startv
== newregion
->vaddr
);
446 if(newregion
->next
) {
447 assert(newregion
->vaddr
< newregion
->next
->vaddr
);
451 SANITYCHECK(SCL_FUNCTIONS
);
456 /*===========================================================================*
458 *===========================================================================*/
459 PUBLIC
void pb_unreferenced(struct vir_region
*region
, struct phys_region
*pr
)
461 struct phys_block
*pb
;
465 assert(pb
->refcount
> 0);
466 USE(pb
, pb
->refcount
--;);
467 assert(pb
->refcount
>= 0);
469 if(pb
->firstregion
== pr
) {
470 USE(pb
, pb
->firstregion
= pr
->next_ph_list
;);
472 struct phys_region
*others
;
474 for(others
= pb
->firstregion
; others
;
475 others
= others
->next_ph_list
) {
476 assert(others
->ph
== pb
);
477 if(others
->next_ph_list
== pr
) {
478 USE(others
, others
->next_ph_list
= pr
->next_ph_list
;);
483 assert(others
); /* Otherwise, wasn't on the list. */
486 if(pb
->refcount
== 0) {
487 assert(!pb
->firstregion
);
488 if(region
->flags
& VR_ANON
) {
489 free_mem(ABS2CLICK(pb
->phys
),
490 ABS2CLICK(pb
->length
));
491 } else if(region
->flags
& VR_DIRECT
) {
492 ; /* No action required. */
494 panic("strange phys flags");
498 struct phys_region
*others
;
501 for(others
= pb
->firstregion
; others
;
502 others
= others
->next_ph_list
) {
503 if(WRITABLE(region
, others
->ph
)) {
504 if(map_ph_writept(others
->parent
->parent
,
505 others
->parent
, others
) != OK
) {
506 printf("VM: map_ph_writept failed unexpectedly\n");
511 assert(n
== pb
->refcount
);
515 PRIVATE
struct phys_region
*reset_physr_iter(struct vir_region
*region
,
516 physr_iter
*iter
, vir_bytes offset
)
518 struct phys_region
*ph
;
520 physr_start_iter(region
->phys
, iter
, offset
, AVL_EQUAL
);
521 ph
= physr_get_iter(iter
);
523 assert(ph
->offset
== offset
);
528 /*===========================================================================*
530 *===========================================================================*/
531 PRIVATE
int map_subfree(struct vmproc
*vmp
,
532 struct vir_region
*region
, vir_bytes len
)
534 struct phys_region
*pr
, *nextpr
;
540 physr_start_iter_least(region
->phys
, &iter
);
541 while((pr
= physr_get_iter(&iter
))) {
542 struct phys_region
*others
;
543 struct phys_block
*pb
;
547 for(others
= pb
->firstregion
; others
;
548 others
= others
->next_ph_list
) {
549 assert(others
->ph
== pb
);
551 physr_incr_iter(&iter
);
556 physr_start_iter_least(region
->phys
, &iter
);
557 while((pr
= physr_get_iter(&iter
))) {
558 physr_incr_iter(&iter
);
559 if(pr
->offset
>= len
)
561 if(pr
->offset
+ pr
->ph
->length
<= len
) {
562 pb_unreferenced(region
, pr
);
563 physr_remove(region
->phys
, pr
->offset
);
564 physr_start_iter_least(region
->phys
, &iter
);
568 assert(len
> pr
->offset
);
569 assert(len
< pr
->offset
+ pr
->ph
->length
);
570 assert(pr
->ph
->refcount
> 0);
571 sublen
= len
- pr
->offset
;
572 assert(!(sublen
% VM_PAGE_SIZE
));
573 assert(sublen
< pr
->ph
->length
);
574 if(pr
->ph
->refcount
> 1) {
576 if(!(pr
= map_clone_ph_block(vmp
, region
,
580 assert(pr
->ph
->refcount
== 1);
581 if(!(region
->flags
& VR_DIRECT
)) {
582 free_mem(ABS2CLICK(pr
->ph
->phys
), ABS2CLICK(sublen
));
584 USE(pr
, pr
->offset
+= sublen
;);
586 pr
->ph
->phys
+= sublen
;
587 pr
->ph
->length
-= sublen
;);
588 assert(!(pr
->offset
% VM_PAGE_SIZE
));
589 assert(!(pr
->ph
->phys
% VM_PAGE_SIZE
));
590 assert(!(pr
->ph
->length
% VM_PAGE_SIZE
));
597 /*===========================================================================*
599 *===========================================================================*/
600 PRIVATE
int map_free(struct vmproc
*vmp
, struct vir_region
*region
)
604 if((r
=map_subfree(vmp
, region
, region
->length
)) != OK
) {
605 printf("%d\n", __LINE__
);
610 SLABFREE(region
->phys
););
616 /*========================================================================*
618 *========================================================================*/
619 PUBLIC
int map_free_proc(vmp
)
622 struct vir_region
*r
, *nextr
;
624 SANITYCHECK(SCL_FUNCTIONS
);
626 for(r
= vmp
->vm_regions
; r
; r
= nextr
) {
628 SANITYCHECK(SCL_DETAIL
);
633 vmp
->vm_regions
= nextr
; /* For sanity checks. */
637 SANITYCHECK(SCL_DETAIL
);
640 vmp
->vm_regions
= NULL
;
642 SANITYCHECK(SCL_FUNCTIONS
);
647 /*===========================================================================*
649 *===========================================================================*/
650 PUBLIC
struct vir_region
*map_lookup(vmp
, offset
)
654 struct vir_region
*r
;
656 SANITYCHECK(SCL_FUNCTIONS
);
659 panic("process has no regions: %d", vmp
->vm_endpoint
);
661 for(r
= vmp
->vm_regions
; r
; r
= r
->next
) {
662 if(offset
>= r
->vaddr
&& offset
< r
->vaddr
+ r
->length
)
666 SANITYCHECK(SCL_FUNCTIONS
);
671 PRIVATE u32_t
vrallocflags(u32_t flags
)
673 u32_t allocflags
= 0;
675 if(flags
& VR_PHYS64K
)
676 allocflags
|= PAF_ALIGN64K
;
677 if(flags
& VR_LOWER16MB
)
678 allocflags
|= PAF_LOWER16MB
;
679 if(flags
& VR_LOWER1MB
)
680 allocflags
|= PAF_LOWER1MB
;
681 if(flags
& VR_CONTIG
)
682 allocflags
|= PAF_CONTIG
;
687 /*===========================================================================*
688 * map_new_physblock *
689 *===========================================================================*/
690 PRIVATE
int map_new_physblock(vmp
, region
, start_offset
, length
,
691 what_mem
, allocflags
, written
)
693 struct vir_region
*region
;
694 vir_bytes start_offset
;
700 struct memlist
*memlist
, given
, *ml
;
702 vir_bytes mapped
= 0;
703 vir_bytes offset
= start_offset
;
705 SANITYCHECK(SCL_FUNCTIONS
);
707 assert(!(length
% VM_PAGE_SIZE
));
709 if((region
->flags
& VR_CONTIG
) &&
710 (start_offset
> 0 || length
< region
->length
)) {
711 printf("VM: map_new_physblock: non-full allocation requested\n");
715 /* Memory for new physical block. */
716 if(what_mem
== MAP_NONE
) {
717 allocflags
|= vrallocflags(region
->flags
);
719 if(!(memlist
= alloc_mem_in_list(length
, allocflags
))) {
720 printf("map_new_physblock: couldn't allocate\n");
725 given
.phys
= what_mem
;
726 given
.length
= length
;
731 assert(given
.length
);
736 for(ml
= memlist
; ml
; ml
= ml
->next
) {
741 for(ml
= memlist
; ml
; ml
= ml
->next
) {
742 struct phys_region
*newphysr
= NULL
;
743 struct phys_block
*newpb
= NULL
;
745 /* Allocate things necessary for this chunk of memory. */
746 if(!SLABALLOC(newphysr
) || !SLABALLOC(newpb
)) {
747 printf("map_new_physblock: no memory for the ph slabs\n");
748 if(newphysr
) SLABFREE(newphysr
);
749 if(newpb
) SLABFREE(newpb
);
757 /* New physical block. */
758 assert(!(ml
->phys
% VM_PAGE_SIZE
));
761 newpb
->phys
= ml
->phys
;
763 newpb
->length
= ml
->length
;
764 newpb
->firstregion
= newphysr
;);
766 /* New physical region. */
768 newphysr
->offset
= offset
;
769 newphysr
->ph
= newpb
;
770 newphysr
->parent
= region
;
771 /* No other references to this block. */
772 newphysr
->next_ph_list
= NULL
;);
774 USE(newphysr
, newphysr
->written
= written
;);
777 /* Update pagetable. */
778 if(map_ph_writept(vmp
, region
, newphysr
) != OK
) {
779 printf("map_new_physblock: map_ph_writept failed\n");
784 physr_insert(region
->phys
, newphysr
);
786 offset
+= ml
->length
;
787 mapped
+= ml
->length
;
792 offset
= start_offset
;
793 /* Things did not go well. Undo everything. */
794 for(ml
= memlist
; ml
; ml
= ml
->next
) {
795 struct phys_region
*physr
;
796 offset
+= ml
->length
;
797 if((physr
= physr_search(region
->phys
, offset
,
799 assert(physr
->ph
->refcount
== 1);
800 pb_unreferenced(region
, physr
);
801 physr_remove(region
->phys
, physr
->offset
);
805 } else assert(mapped
== length
);
807 /* Always clean up the memlist itself, even if everything
808 * worked we're not using the memlist nodes any more. And
809 * the memory they reference is either freed above or in use.
811 free_mem_list(memlist
, 0);
814 SANITYCHECK(SCL_FUNCTIONS
);
819 /*===========================================================================*
820 * map_clone_ph_block *
821 *===========================================================================*/
822 PRIVATE
struct phys_region
*map_clone_ph_block(vmp
, region
, ph
, iter
)
824 struct vir_region
*region
;
825 struct phys_region
*ph
;
828 vir_bytes offset
, length
;
832 struct phys_region
*newpr
;
835 written
= ph
->written
;
837 SANITYCHECK(SCL_FUNCTIONS
);
839 /* Warning: this function will free the passed
840 * phys_region *ph and replace it (in the same offset)
841 * with one or more others! So both the pointer to it
842 * and any iterators over the phys_regions in the vir_region
843 * will be invalid on successful return. (Iterators over
844 * the vir_region could be invalid on unsuccessful return too.)
847 /* This function takes a physical block, copies its contents
848 * into newly allocated memory, and replaces the single physical
849 * block by one or more physical blocks with refcount 1 with the
850 * same contents as the original. In other words, a fragmentable
851 * version of map_copy_ph_block().
854 /* Remember where and how much. */
856 length
= ph
->ph
->length
;
857 physaddr
= ph
->ph
->phys
;
859 /* Now unlink the original physical block so we can replace
863 SANITYCHECK(SCL_DETAIL
);
866 assert(ph
->ph
->refcount
> 1);
867 pb_unreferenced(region
, ph
);
868 assert(ph
->ph
->refcount
>= 1);
869 physr_remove(region
->phys
, offset
);
872 SANITYCHECK(SCL_DETAIL
);
874 /* Put new free memory in. */
875 allocflags
= vrallocflags(region
->flags
);
876 assert(!(allocflags
& PAF_CONTIG
));
877 assert(!(allocflags
& PAF_CLEAR
));
879 if(map_new_physblock(vmp
, region
, offset
, length
,
880 MAP_NONE
, allocflags
, written
) != OK
) {
881 /* XXX original range now gone. */
882 free_mem_list(ml
, 0);
883 printf("VM: map_clone_ph_block: map_new_physblock failed.\n");
887 /* Copy the block to the new memory.
888 * Can only fail if map_new_physblock didn't do what we asked.
890 if(copy_abs2region(physaddr
, region
, offset
, length
) != OK
)
891 panic("copy_abs2region failed, no good reason for that");
893 newpr
= physr_search(region
->phys
, offset
, AVL_EQUAL
);
895 assert(newpr
->offset
== offset
);
898 physr_start_iter(region
->phys
, iter
, offset
, AVL_EQUAL
);
899 assert(physr_get_iter(iter
) == newpr
);
902 SANITYCHECK(SCL_FUNCTIONS
);
908 /*===========================================================================*
910 *===========================================================================*/
911 PUBLIC
int map_pf(vmp
, region
, offset
, write
)
913 struct vir_region
*region
;
918 struct phys_region
*ph
;
922 assert(offset
< region
->length
);
924 assert(region
->flags
& VR_ANON
);
925 assert(!(region
->vaddr
% VM_PAGE_SIZE
));
927 virpage
= offset
- offset
% VM_PAGE_SIZE
;
929 SANITYCHECK(SCL_FUNCTIONS
);
931 if((ph
= physr_search(region
->phys
, offset
, AVL_LESS_EQUAL
)) &&
932 (ph
->offset
<= offset
&& offset
< ph
->offset
+ ph
->ph
->length
)) {
933 phys_bytes blockoffset
= ph
->offset
;
934 /* Pagefault in existing block. Do copy-on-write. */
936 assert(region
->flags
& VR_WRITABLE
);
937 assert(ph
->ph
->refcount
> 0);
939 if(WRITABLE(region
, ph
->ph
)) {
940 r
= map_ph_writept(vmp
, region
, ph
);
942 printf("map_ph_writept failed\n");
944 if(ph
->ph
->refcount
> 0
945 && ph
->ph
->share_flag
!= PBSH_COW
) {
946 printf("VM: write RO mapped pages.\n");
949 if(!map_clone_ph_block(vmp
, region
, ph
, NULL
))
954 /* Pagefault in non-existing block. Map in new block. */
955 if(map_new_physblock(vmp
, region
, virpage
,
956 VM_PAGE_SIZE
, MAP_NONE
, PAF_CLEAR
, 0) != OK
) {
957 printf("map_new_physblock failed\n");
962 SANITYCHECK(SCL_FUNCTIONS
);
965 printf("VM: map_pf: failed (%d)\n", r
);
970 if(OK
!= pt_checkrange(&vmp
->vm_pt
, region
->vaddr
+virpage
,
971 VM_PAGE_SIZE
, write
)) {
972 panic("map_pf: pt_checkrange failed: %d", r
);
979 /*===========================================================================*
980 * map_handle_memory *
981 *===========================================================================*/
982 PUBLIC
int map_handle_memory(vmp
, region
, offset
, length
, write
)
984 struct vir_region
*region
;
985 vir_bytes offset
, length
;
988 struct phys_region
*physr
, *nextphysr
;
992 #define FREE_RANGE_HERE(er1, er2) { \
993 struct phys_region *r1 = (er1), *r2 = (er2); \
994 vir_bytes start = offset, end = offset + length; \
996 start = MAX(start, r1->offset + r1->ph->length); } \
998 end = MIN(end, r2->offset); } \
1001 SANITYCHECK(SCL_DETAIL); \
1002 if(map_new_physblock(vmp, region, start, \
1003 end-start, MAP_NONE, PAF_CLEAR, 0) != OK) { \
1004 SANITYCHECK(SCL_DETAIL); \
1011 SANITYCHECK(SCL_FUNCTIONS
);
1013 assert(region
->flags
& VR_ANON
);
1014 assert(!(region
->vaddr
% VM_PAGE_SIZE
));
1015 assert(!(offset
% VM_PAGE_SIZE
));
1016 assert(!(length
% VM_PAGE_SIZE
));
1017 assert(!write
|| (region
->flags
& VR_WRITABLE
));
1019 physr_start_iter(region
->phys
, &iter
, offset
, AVL_LESS_EQUAL
);
1020 physr
= physr_get_iter(&iter
);
1023 physr_start_iter(region
->phys
, &iter
, offset
, AVL_GREATER_EQUAL
);
1024 physr
= physr_get_iter(&iter
);
1027 FREE_RANGE_HERE(NULL
, physr
);
1030 physr
= reset_physr_iter(region
, &iter
, physr
->offset
);
1031 if(physr
->offset
+ physr
->ph
->length
<= offset
) {
1032 physr_incr_iter(&iter
);
1033 physr
= physr_get_iter(&iter
);
1035 FREE_RANGE_HERE(NULL
, physr
);
1037 physr
= reset_physr_iter(region
, &iter
,
1046 SANITYCHECK(SCL_DETAIL
);
1049 assert(physr
->ph
->refcount
> 0);
1050 if(!WRITABLE(region
, physr
->ph
)) {
1051 if(!(physr
= map_clone_ph_block(vmp
, region
,
1053 printf("VM: map_handle_memory: no copy\n");
1058 SANITYCHECK(SCL_DETAIL
);
1059 if((r
=map_ph_writept(vmp
, region
, physr
)) != OK
) {
1060 printf("VM: map_ph_writept failed\n");
1064 SANITYCHECK(SCL_DETAIL
);
1068 SANITYCHECK(SCL_DETAIL
);
1069 physr_incr_iter(&iter
);
1070 nextphysr
= physr_get_iter(&iter
);
1071 FREE_RANGE_HERE(physr
, nextphysr
);
1072 SANITYCHECK(SCL_DETAIL
);
1074 if(nextphysr
->offset
>= offset
+ length
)
1076 nextphysr
= reset_physr_iter(region
, &iter
,
1082 SANITYCHECK(SCL_FUNCTIONS
);
1086 printf("region start at 0x%lx offset 0x%lx len 0x%lx write %d\n",
1087 region
->vaddr
, offset
, length
, write
);
1088 printf("no changes in map_handle_memory\n");
1094 if(OK
!= pt_checkrange(&vmp
->vm_pt
, region
->vaddr
+offset
, length
, write
)) {
1095 printf("handle mem %s-", arch_map2str(vmp
, region
->vaddr
+offset
));
1096 printf("%s failed\n", arch_map2str(vmp
, region
->vaddr
+offset
+length
));
1097 map_printregion(vmp
, region
);
1098 panic("checkrange failed");
1106 static int countregions(struct vir_region
*vr
)
1109 struct phys_region
*ph
;
1111 physr_start_iter_least(vr
->phys
, &iter
);
1112 while((ph
= physr_get_iter(&iter
))) {
1114 physr_incr_iter(&iter
);
1120 /*===========================================================================*
1122 *===========================================================================*/
1123 PRIVATE
struct vir_region
*map_copy_region(struct vmproc
*vmp
, struct vir_region
*vr
)
1125 /* map_copy_region creates a complete copy of the vir_region
1126 * data structure, linking in the same phys_blocks directly,
1127 * but all in limbo, i.e., the caller has to link the vir_region
1128 * to a process. Therefore it doesn't increase the refcount in
1129 * the phys_block; the caller has to do this once it's linked.
1130 * The reason for this is to keep the sanity checks working
1131 * within this function.
1133 struct vir_region
*newvr
;
1134 struct phys_region
*ph
;
1139 cr
= countregions(vr
);
1142 if(!SLABALLOC(newvr
))
1152 newvr
->phys
= phavl
;
1154 physr_init(newvr
->phys
);
1156 physr_start_iter_least(vr
->phys
, &iter
);
1157 while((ph
= physr_get_iter(&iter
))) {
1158 struct phys_region
*newph
;
1159 if(!SLABALLOC(newph
)) {
1160 map_free(vmp
, newvr
);
1165 newph
->next_ph_list
= NULL
;
1166 newph
->parent
= newvr
;
1167 newph
->offset
= ph
->offset
;);
1169 USE(newph
, newph
->written
= 0;);
1171 physr_insert(newvr
->phys
, newph
);
1173 assert(countregions(vr
) == cr
);
1175 physr_incr_iter(&iter
);
1179 assert(countregions(vr
) == countregions(newvr
));
1185 /*===========================================================================*
1187 *===========================================================================*/
1188 PUBLIC
int copy_abs2region(phys_bytes abs
, struct vir_region
*destregion
,
1189 phys_bytes offset
, phys_bytes len
)
1193 assert(destregion
->phys
);
1195 phys_bytes sublen
, suboffset
;
1196 struct phys_region
*ph
;
1198 assert(destregion
->phys
);
1199 if(!(ph
= physr_search(destregion
->phys
, offset
, AVL_LESS_EQUAL
))) {
1200 printf("VM: copy_abs2region: no phys region found (1).\n");
1203 assert(ph
->offset
<= offset
);
1204 if(ph
->offset
+ph
->ph
->length
<= offset
) {
1205 printf("VM: copy_abs2region: no phys region found (2).\n");
1208 suboffset
= offset
- ph
->offset
;
1209 assert(suboffset
< ph
->ph
->length
);
1211 if(sublen
> ph
->ph
->length
- suboffset
)
1212 sublen
= ph
->ph
->length
- suboffset
;
1213 assert(suboffset
+ sublen
<= ph
->ph
->length
);
1214 if(ph
->ph
->refcount
!= 1) {
1215 printf("VM: copy_abs2region: no phys region found (3).\n");
1219 if(sys_abscopy(abs
, ph
->ph
->phys
+ suboffset
, sublen
) != OK
) {
1220 printf("VM: copy_abs2region: abscopy failed.\n");
1231 /*=========================================================================*
1233 *=========================================================================*/
1234 PUBLIC
int map_writept(struct vmproc
*vmp
)
1236 struct vir_region
*vr
;
1237 struct phys_region
*ph
;
1240 for(vr
= vmp
->vm_regions
; vr
; vr
= vr
->next
) {
1242 physr_start_iter_least(vr
->phys
, &iter
);
1243 while((ph
= physr_get_iter(&iter
))) {
1244 physr_incr_iter(&iter
);
1246 /* If this phys block is shared as SMAP, then do
1247 * not update the page table. */
1248 if(ph
->ph
->refcount
> 1
1249 && ph
->ph
->share_flag
== PBSH_SMAP
) {
1253 if((r
=map_ph_writept(vmp
, vr
, ph
)) != OK
) {
1254 printf("VM: map_writept: failed\n");
1263 /*========================================================================*
1265 *========================================================================*/
1266 PUBLIC
int map_proc_copy(dst
, src
)
1270 struct vir_region
*vr
, *prevvr
= NULL
;
1271 dst
->vm_regions
= NULL
;
1273 SANITYCHECK(SCL_FUNCTIONS
);
1276 for(vr
= src
->vm_regions
; vr
; vr
= vr
->next
) {
1277 physr_iter iter_orig
, iter_new
;
1278 struct vir_region
*newvr
;
1279 struct phys_region
*orig_ph
, *new_ph
;
1280 if(!(newvr
= map_copy_region(dst
, vr
))) {
1284 USE(newvr
, newvr
->parent
= dst
;);
1285 if(prevvr
) { USE(prevvr
, prevvr
->next
= newvr
;); }
1286 else { dst
->vm_regions
= newvr
; }
1287 physr_start_iter_least(vr
->phys
, &iter_orig
);
1288 physr_start_iter_least(newvr
->phys
, &iter_new
);
1289 while((orig_ph
= physr_get_iter(&iter_orig
))) {
1290 struct phys_block
*pb
;
1291 new_ph
= physr_get_iter(&iter_new
);
1292 /* Check two physregions both are nonnull,
1293 * are different, and match physblocks.
1297 assert(orig_ph
!= new_ph
);
1299 assert(pb
== new_ph
->ph
);
1301 /* Link in new physregion. */
1302 assert(!new_ph
->next_ph_list
);
1303 USE(new_ph
, new_ph
->next_ph_list
= pb
->firstregion
;);
1304 USE(pb
, pb
->firstregion
= new_ph
;);
1306 /* Increase phys block refcount */
1307 assert(pb
->refcount
> 0);
1308 USE(pb
, pb
->refcount
++;);
1309 assert(pb
->refcount
> 1);
1311 /* If the phys block has been shared as SMAP,
1312 * do the regular copy. */
1313 if(pb
->refcount
> 2 && pb
->share_flag
== PBSH_SMAP
) {
1314 map_clone_ph_block(dst
, newvr
,new_ph
,
1317 USE(pb
, pb
->share_flag
= PBSH_COW
;);
1320 /* Get next new physregion */
1321 physr_incr_iter(&iter_orig
);
1322 physr_incr_iter(&iter_new
);
1324 assert(!physr_get_iter(&iter_new
));
1331 SANITYCHECK(SCL_FUNCTIONS
);
1335 /*========================================================================*
1337 *========================================================================*/
1338 PUBLIC
struct vir_region
*map_proc_kernel(struct vmproc
*vmp
)
1340 struct vir_region
*vr
;
1342 /* We assume these are the first regions to be mapped to
1343 * make the function a bit simpler (free all regions on error).
1345 assert(!vmp
->vm_regions
);
1346 assert(vmproc
[VMP_SYSTEM
].vm_flags
& VMF_INUSE
);
1347 assert(!(KERNEL_TEXT
% VM_PAGE_SIZE
));
1348 assert(!(KERNEL_TEXT_LEN
% VM_PAGE_SIZE
));
1349 assert(!(KERNEL_DATA
% VM_PAGE_SIZE
));
1350 assert(!(KERNEL_DATA_LEN
% VM_PAGE_SIZE
));
1352 if(!(vr
= map_page_region(vmp
, KERNEL_TEXT
, 0, KERNEL_TEXT_LEN
,
1353 KERNEL_TEXT
, VR_DIRECT
| VR_WRITABLE
| VR_NOPF
, 0)) ||
1354 !(vr
= map_page_region(vmp
, KERNEL_DATA
, 0, KERNEL_DATA_LEN
,
1355 KERNEL_DATA
, VR_DIRECT
| VR_WRITABLE
| VR_NOPF
, 0))) {
1360 return vr
; /* Return pointer not useful, just non-NULL. */
1363 /*========================================================================*
1364 * map_region_extend *
1365 *========================================================================*/
1366 PUBLIC
int map_region_extend(struct vmproc
*vmp
, struct vir_region
*vr
,
1372 assert(vr
->flags
& VR_ANON
);
1373 assert(!(delta
% VM_PAGE_SIZE
));
1375 if(!delta
) return OK
;
1376 end
= vr
->vaddr
+ vr
->length
;
1377 assert(end
>= vr
->vaddr
);
1379 if(end
+ delta
<= end
) {
1380 printf("VM: strange delta 0x%lx\n", delta
);
1384 if(!vr
->next
|| end
+ delta
<= vr
->next
->vaddr
) {
1385 USE(vr
, vr
->length
+= delta
;);
1394 /*========================================================================*
1395 * map_region_shrink *
1396 *========================================================================*/
1397 PUBLIC
int map_region_shrink(struct vir_region
*vr
, vir_bytes delta
)
1400 assert(vr
->flags
& VR_ANON
);
1401 assert(!(delta
% VM_PAGE_SIZE
));
1404 printf("VM: ignoring region shrink\n");
1410 PUBLIC
struct vir_region
*map_region_lookup_tag(vmp
, tag
)
1414 struct vir_region
*vr
;
1416 for(vr
= vmp
->vm_regions
; vr
; vr
= vr
->next
)
1423 PUBLIC
void map_region_set_tag(struct vir_region
*vr
, u32_t tag
)
1425 USE(vr
, vr
->tag
= tag
;);
1428 PUBLIC u32_t
map_region_get_tag(struct vir_region
*vr
)
1433 /*========================================================================*
1434 * map_unmap_region *
1435 *========================================================================*/
1436 PUBLIC
int map_unmap_region(struct vmproc
*vmp
, struct vir_region
*region
,
1439 /* Shrink the region by 'len' bytes, from the start. Unreference
1440 * memory it used to reference if any.
1442 struct vir_region
*r
, *nextr
, *prev
= NULL
;
1443 vir_bytes regionstart
;
1445 SANITYCHECK(SCL_FUNCTIONS
);
1447 for(r
= vmp
->vm_regions
; r
; r
= r
->next
) {
1454 SANITYCHECK(SCL_DETAIL
);
1457 panic("map_unmap_region: region not found");
1459 if(len
> r
->length
|| (len
% VM_PAGE_SIZE
)) {
1460 printf("VM: bogus length 0x%lx\n", len
);
1464 if(!(r
->flags
& (VR_ANON
|VR_DIRECT
))) {
1465 printf("VM: only unmap anonymous or direct memory\n");
1469 regionstart
= r
->vaddr
;
1471 if(len
== r
->length
) {
1472 /* Whole region disappears. Unlink and free it. */
1474 vmp
->vm_regions
= r
->next
;
1476 USE(prev
, prev
->next
= r
->next
;);
1480 struct phys_region
*pr
;
1482 /* Region shrinks. First unreference its memory
1483 * and then shrink the region.
1485 map_subfree(vmp
, r
, len
);
1489 physr_start_iter_least(r
->phys
, &iter
);
1491 /* vaddr has increased; to make all the phys_regions
1492 * point to the same addresses, make them shrink by the
1495 while((pr
= physr_get_iter(&iter
))) {
1496 assert(pr
->offset
>= len
);
1497 USE(pr
, pr
->offset
-= len
;);
1498 physr_incr_iter(&iter
);
1502 SANITYCHECK(SCL_DETAIL
);
1504 if(pt_writemap(&vmp
->vm_pt
, regionstart
,
1505 MAP_NONE
, len
, 0, WMF_OVERWRITE
) != OK
) {
1506 printf("VM: map_unmap_region: pt_writemap failed\n");
1510 SANITYCHECK(SCL_FUNCTIONS
);
1515 /*========================================================================*
1517 *========================================================================*/
1518 PUBLIC
int map_remap(struct vmproc
*dvmp
, vir_bytes da
, size_t size
,
1519 struct vir_region
*region
, vir_bytes
*r
)
1521 struct vir_region
*vr
, *prev
;
1522 struct phys_region
*ph
;
1523 vir_bytes startv
, dst_addr
;
1526 SANITYCHECK(SCL_FUNCTIONS
);
1528 assert(region
->flags
& VR_SHARED
);
1530 /* da is handled differently */
1532 dst_addr
= dvmp
->vm_stacktop
;
1535 dst_addr
= arch_vir2map(dvmp
, dst_addr
);
1538 /* round up to page size */
1539 assert(!(size
% VM_PAGE_SIZE
));
1540 startv
= region_find_slot(dvmp
, dst_addr
, VM_DATATOP
, size
, &prev
);
1541 if (startv
== (vir_bytes
) -1) {
1542 printf("map_remap: search 0x%x...\n", dst_addr
);
1546 /* when the user specifies the address, we cannot change it */
1547 if (da
&& (startv
!= dst_addr
))
1550 vr
= map_copy_region(dvmp
, region
);
1557 vr
->flags
= region
->flags
;
1559 vr
->parent
= dvmp
;);
1560 assert(vr
->flags
& VR_SHARED
);
1564 vr
->next
= prev
->next
;);
1565 USE(prev
, prev
->next
= vr
;);
1568 vr
->next
= dvmp
->vm_regions
;);
1569 dvmp
->vm_regions
= vr
;
1572 physr_start_iter_least(vr
->phys
, &iter
);
1573 while((ph
= physr_get_iter(&iter
))) {
1574 struct phys_block
*pb
= ph
->ph
;
1575 assert(!ph
->next_ph_list
);
1576 USE(ph
, ph
->next_ph_list
= pb
->firstregion
;);
1577 USE(pb
, pb
->firstregion
= ph
;);
1578 USE(pb
, pb
->refcount
++;);
1579 if(map_ph_writept(dvmp
, vr
, ph
) != OK
) {
1580 panic("map_remap: map_ph_writept failed");
1582 physr_incr_iter(&iter
);
1587 SANITYCHECK(SCL_FUNCTIONS
);
1592 /*========================================================================*
1594 *========================================================================*/
1595 PUBLIC
int map_get_phys(struct vmproc
*vmp
, vir_bytes addr
, phys_bytes
*r
)
1597 struct vir_region
*vr
;
1598 struct phys_region
*ph
;
1601 if (!(vr
= map_lookup(vmp
, addr
)) ||
1602 (vr
->vaddr
!= addr
))
1605 if (!(vr
->flags
& VR_SHARED
))
1608 physr_start_iter_least(vr
->phys
, &iter
);
1609 ph
= physr_get_iter(&iter
);
1619 /*========================================================================*
1621 *========================================================================*/
1622 PUBLIC
int map_get_ref(struct vmproc
*vmp
, vir_bytes addr
, u8_t
*cnt
)
1624 struct vir_region
*vr
;
1625 struct phys_region
*ph
;
1628 if (!(vr
= map_lookup(vmp
, addr
)) ||
1629 (vr
->vaddr
!= addr
))
1632 if (!(vr
->flags
& VR_SHARED
))
1635 physr_start_iter_least(vr
->phys
, &iter
);
1636 ph
= physr_get_iter(&iter
);
1641 *cnt
= ph
->ph
->refcount
;
1646 /*========================================================================*
1648 *========================================================================*/
1649 PUBLIC
void get_usage_info(struct vmproc
*vmp
, struct vm_usage_info
*vui
)
1651 struct vir_region
*vr
;
1653 struct phys_region
*ph
;
1656 memset(vui
, 0, sizeof(*vui
));
1658 for(vr
= vmp
->vm_regions
; vr
; vr
= vr
->next
) {
1659 physr_start_iter_least(vr
->phys
, &iter
);
1660 while((ph
= physr_get_iter(&iter
))) {
1661 len
= ph
->ph
->length
;
1663 /* All present pages are counted towards the total. */
1664 vui
->vui_total
+= len
;
1666 if (ph
->ph
->refcount
> 1) {
1667 /* Any page with a refcount > 1 is common. */
1668 vui
->vui_common
+= len
;
1670 /* Any common, non-COW page is shared. */
1671 if (vr
->flags
& VR_SHARED
||
1672 ph
->ph
->share_flag
== PBSH_SMAP
)
1673 vui
->vui_shared
+= len
;
1675 physr_incr_iter(&iter
);
1680 /*===========================================================================*
1682 *===========================================================================*/
1683 PUBLIC
int get_region_info(struct vmproc
*vmp
, struct vm_region_info
*vri
,
1684 int max
, vir_bytes
*nextp
)
1686 struct vir_region
*vr
;
1694 for(vr
= vmp
->vm_regions
; vr
; vr
= vr
->next
)
1695 if (vr
->vaddr
>= next
) break;
1699 for(count
= 0; vr
&& count
< max
; vr
= vr
->next
, count
++, vri
++) {
1700 vri
->vri_addr
= arch_map2info(vmp
, vr
->vaddr
, &vri
->vri_seg
,
1702 vri
->vri_length
= vr
->length
;
1704 /* "AND" the provided protection with per-page protection. */
1705 if (!(vr
->flags
& VR_WRITABLE
))
1706 vri
->vri_prot
&= ~PROT_WRITE
;
1708 vri
->vri_flags
= (vr
->flags
& VR_SHARED
) ? MAP_SHARED
: 0;
1710 next
= vr
->vaddr
+ vr
->length
;
1717 /*========================================================================*
1718 * regionprintstats *
1719 *========================================================================*/
1720 PUBLIC
void printregionstats(struct vmproc
*vmp
)
1722 struct vir_region
*vr
;
1723 struct phys_region
*pr
;
1725 vir_bytes used
= 0, weighted
= 0;
1727 for(vr
= vmp
->vm_regions
; vr
; vr
= vr
->next
) {
1728 if(vr
->flags
& VR_DIRECT
)
1730 physr_start_iter_least(vr
->phys
, &iter
);
1731 while((pr
= physr_get_iter(&iter
))) {
1732 physr_incr_iter(&iter
);
1733 used
+= pr
->ph
->length
;
1734 weighted
+= pr
->ph
->length
/ pr
->ph
->refcount
;
1738 printf("%6dkB %6dkB\n", used
/1024, weighted
/1024);
1743 /*===========================================================================*
1745 *===========================================================================*/
1746 PRIVATE
int do_map_memory(struct vmproc
*vms
, struct vmproc
*vmd
,
1747 struct vir_region
*vrs
, struct vir_region
*vrd
,
1748 vir_bytes offset_s
, vir_bytes offset_d
,
1749 vir_bytes length
, int flag
)
1751 struct phys_region
*prs
;
1752 struct phys_region
*newphysr
;
1753 struct phys_block
*pb
;
1755 u32_t pt_flag
= PTF_PRESENT
| PTF_USER
;
1758 SANITYCHECK(SCL_FUNCTIONS
);
1760 /* Search for the first phys region in the source process. */
1761 physr_start_iter(vrs
->phys
, &iter
, offset_s
, AVL_EQUAL
);
1762 prs
= physr_get_iter(&iter
);
1764 panic("do_map_memory: no aligned phys region: %d", 0);
1766 /* flag: 0 -> read-only
1768 * -1 -> share as COW, so read-only
1771 pt_flag
|= PTF_WRITE
;
1773 /* Map phys blocks in the source process to the destination process. */
1774 end
= offset_d
+ length
;
1775 while((prs
= physr_get_iter(&iter
)) && offset_d
< end
) {
1776 /* If a SMAP share was requested but the phys block has already
1777 * been shared as COW, copy the block for the source phys region
1781 if(flag
>= 0 && pb
->refcount
> 1
1782 && pb
->share_flag
== PBSH_COW
) {
1783 if(!(prs
= map_clone_ph_block(vms
, vrs
, prs
, &iter
)))
1788 /* Allocate a new phys region. */
1789 if(!SLABALLOC(newphysr
))
1792 /* Set and link the new phys region to the block. */
1794 newphysr
->offset
= offset_d
;
1795 newphysr
->parent
= vrd
;
1796 newphysr
->next_ph_list
= pb
->firstregion
;
1797 pb
->firstregion
= newphysr
;
1798 physr_insert(newphysr
->parent
->phys
, newphysr
);
1801 /* If a COW share was requested but the phys block has already
1802 * been shared as SMAP, give up on COW and copy the block for
1803 * the destination phys region now.
1805 if(flag
< 0 && pb
->refcount
> 1
1806 && pb
->share_flag
== PBSH_SMAP
) {
1807 if(!(newphysr
= map_clone_ph_block(vmd
, vrd
,
1813 /* See if this is a COW share or SMAP share. */
1814 if(flag
< 0) { /* COW share */
1815 pb
->share_flag
= PBSH_COW
;
1816 /* Update the page table for the src process. */
1817 pt_writemap(&vms
->vm_pt
, offset_s
+ vrs
->vaddr
,
1818 pb
->phys
, pb
->length
,
1819 pt_flag
, WMF_OVERWRITE
);
1821 else { /* SMAP share */
1822 pb
->share_flag
= PBSH_SMAP
;
1824 /* Update the page table for the destination process. */
1825 pt_writemap(&vmd
->vm_pt
, offset_d
+ vrd
->vaddr
,
1826 pb
->phys
, pb
->length
, pt_flag
, WMF_OVERWRITE
);
1829 physr_incr_iter(&iter
);
1830 offset_d
+= pb
->length
;
1831 offset_s
+= pb
->length
;
1834 SANITYCHECK(SCL_FUNCTIONS
);
1839 /*===========================================================================*
1841 *===========================================================================*/
1842 PUBLIC
int unmap_memory(endpoint_t sour
, endpoint_t dest
,
1843 vir_bytes virt_s
, vir_bytes virt_d
, vir_bytes length
, int flag
)
1846 struct vir_region
*vrd
;
1847 struct phys_region
*pr
;
1848 struct phys_block
*pb
;
1853 /* Use information on the destination process to unmap. */
1854 if(vm_isokendpt(dest
, &p
) != OK
)
1855 panic("unmap_memory: bad endpoint: %d", dest
);
1858 vrd
= map_lookup(vmd
, virt_d
);
1861 /* Search for the first phys region in the destination process. */
1862 off
= virt_d
- vrd
->vaddr
;
1863 physr_start_iter(vrd
->phys
, &iter
, off
, AVL_EQUAL
);
1864 pr
= physr_get_iter(&iter
);
1866 panic("unmap_memory: no aligned phys region: %d", 0);
1868 /* Copy the phys block now rather than doing COW. */
1870 while((pr
= physr_get_iter(&iter
)) && off
< end
) {
1872 assert(pb
->refcount
> 1);
1873 assert(pb
->share_flag
== PBSH_SMAP
);
1875 if(!(pr
= map_clone_ph_block(vmd
, vrd
, pr
, &iter
)))
1878 physr_incr_iter(&iter
);
1885 /*===========================================================================*
1887 *===========================================================================*/
1888 PRIVATE
int split_phys(struct phys_region
*pr
, vir_bytes point
)
1890 struct phys_region
*newpr
, *q
, *prev
;
1891 struct phys_block
*newpb
;
1892 struct phys_block
*pb
= pr
->ph
;
1893 /* Split the phys region into 2 parts by @point. */
1895 if(pr
->offset
>= point
|| pr
->offset
+ pb
->length
<= point
)
1897 if(!SLABALLOC(newpb
))
1900 /* Split phys block. */
1902 pb
->length
= point
- pr
->offset
;
1903 newpb
->length
-= pb
->length
;
1904 newpb
->phys
+= pb
->length
;
1906 /* Split phys regions in a list. */
1907 for(q
= pb
->firstregion
; q
; q
= q
->next_ph_list
) {
1908 if(!SLABALLOC(newpr
))
1913 newpr
->offset
+= pb
->length
;
1915 /* Link to the vir region's phys region list. */
1916 physr_insert(newpr
->parent
->phys
, newpr
);
1918 /* Link to the next_ph_list. */
1919 if(q
== pb
->firstregion
) {
1920 newpb
->firstregion
= newpr
;
1923 prev
->next_ph_list
= newpr
;
1927 prev
->next_ph_list
= NULL
;
1932 /*===========================================================================*
1933 * clean_phys_regions *
1934 *===========================================================================*/
1935 PRIVATE
void clean_phys_regions(struct vir_region
*region
,
1936 vir_bytes offset
, vir_bytes length
)
1938 /* Consider @offset as the start address and @offset+length as the end address.
1939 * If there are phys regions crossing the start address or the end address,
1940 * split them into 2 parts.
1942 * We assume that the phys regions are listed in order and don't overlap.
1944 struct phys_region
*pr
;
1947 physr_start_iter_least(region
->phys
, &iter
);
1948 while((pr
= physr_get_iter(&iter
))) {
1949 /* If this phys region crosses the start address, split it. */
1950 if(pr
->offset
< offset
1951 && pr
->offset
+ pr
->ph
->length
> offset
) {
1952 split_phys(pr
, offset
);
1953 physr_start_iter_least(region
->phys
, &iter
);
1955 /* If this phys region crosses the end address, split it. */
1956 else if(pr
->offset
< offset
+ length
1957 && pr
->offset
+ pr
->ph
->length
> offset
+ length
) {
1958 split_phys(pr
, offset
+ length
);
1959 physr_start_iter_least(region
->phys
, &iter
);
1962 physr_incr_iter(&iter
);
1967 /*===========================================================================*
1969 *===========================================================================*/
1970 PRIVATE
void rm_phys_regions(struct vir_region
*region
,
1971 vir_bytes begin
, vir_bytes length
)
1973 /* Remove all phys regions between @begin and @begin+length.
1975 * Don't update the page table, because we will update it at map_memory()
1978 struct phys_region
*pr
;
1981 physr_start_iter(region
->phys
, &iter
, begin
, AVL_GREATER_EQUAL
);
1982 while((pr
= physr_get_iter(&iter
)) && pr
->offset
< begin
+ length
) {
1983 pb_unreferenced(region
, pr
);
1984 physr_remove(region
->phys
, pr
->offset
);
1985 physr_start_iter(region
->phys
, &iter
, begin
,
1991 /*===========================================================================*
1993 *===========================================================================*/
1994 PUBLIC
int map_memory(endpoint_t sour
, endpoint_t dest
,
1995 vir_bytes virt_s
, vir_bytes virt_d
, vir_bytes length
, int flag
)
1997 /* This is the entry point. This function will be called by handle_memory() when
1998 * VM recieves a map-memory request.
2000 struct vmproc
*vms
, *vmd
;
2001 struct vir_region
*vrs
, *vrd
;
2003 vir_bytes offset_s
, offset_d
;
2007 if(vm_isokendpt(sour
, &p
) != OK
)
2008 panic("map_memory: bad endpoint: %d", sour
);
2010 if(vm_isokendpt(dest
, &p
) != OK
)
2011 panic("map_memory: bad endpoint: %d", dest
);
2014 vrs
= map_lookup(vms
, virt_s
);
2016 vrd
= map_lookup(vmd
, virt_d
);
2019 /* Linear address -> offset from start of vir region. */
2020 offset_s
= virt_s
- vrs
->vaddr
;
2021 offset_d
= virt_d
- vrd
->vaddr
;
2023 /* Make sure that the range in the source process has been mapped
2024 * to physical memory.
2026 map_handle_memory(vms
, vrs
, offset_s
, length
, 0);
2029 clean_phys_regions(vrs
, offset_s
, length
);
2030 clean_phys_regions(vrd
, offset_d
, length
);
2031 rm_phys_regions(vrd
, offset_d
, length
);
2034 r
= do_map_memory(vms
, vmd
, vrs
, vrd
, offset_s
, offset_d
, length
, flag
);
2039 /*========================================================================*
2041 *========================================================================*/
2043 map_lookup_phys(struct vmproc
*vmp
, u32_t tag
)
2045 struct vir_region
*vr
;
2046 struct phys_region
*pr
;
2049 if(!(vr
= map_region_lookup_tag(vmp
, tag
))) {
2050 printf("VM: request for phys of missing region\n");
2054 physr_start_iter_least(vr
->phys
, &iter
);
2056 if(!(pr
= physr_get_iter(&iter
))) {
2057 printf("VM: request for phys of unmapped region\n");
2061 if(pr
->offset
!= 0 || pr
->ph
->length
!= vr
->length
) {
2062 printf("VM: request for phys of partially mapped region\n");
2066 return pr
->ph
->phys
;