3 #include <minix/callnr.h>
4 #include <minix/type.h>
5 #include <minix/config.h>
6 #include <minix/const.h>
7 #include <minix/sysutil.h>
8 #include <minix/syslib.h>
9 #include <minix/debug.h>
10 #include <minix/bitmap.h>
11 #include <minix/hash.h>
12 #include <machine/multiboot.h>
21 #include <sys/param.h>
28 #include "sanitycheck.h"
31 #include "regionavl.h"
33 static struct vir_region
*map_copy_region(struct vmproc
*vmp
, struct
36 void map_region_init(void)
40 static void map_printregion(struct vir_region
*vr
)
43 struct phys_region
*ph
;
44 printf("map_printmap: map_name: %s\n", vr
->def_memtype
->name
);
45 printf("\t%lx (len 0x%lx, %lukB), %p, %s\n",
46 vr
->vaddr
, vr
->length
, vr
->length
/1024,
47 vr
->def_memtype
->name
,
48 (vr
->flags
& VR_WRITABLE
) ? "writable" : "readonly");
49 printf("\t\tphysblocks:\n");
50 for(i
= 0; i
< vr
->length
/VM_PAGE_SIZE
; i
++) {
51 if(!(ph
=vr
->physblocks
[i
])) continue;
52 printf("\t\t@ %lx (refs %d): phys 0x%lx, %s\n",
53 (vr
->vaddr
+ ph
->offset
),
54 ph
->ph
->refcount
, ph
->ph
->phys
,
55 pt_writable(vr
->parent
, vr
->vaddr
+ ph
->offset
) ? "W" : "R");
60 struct phys_region
*physblock_get(struct vir_region
*region
, vir_bytes offset
)
63 struct phys_region
*foundregion
;
64 assert(!(offset
% VM_PAGE_SIZE
));
65 assert( /* offset >= 0 && */ offset
< region
->length
);
66 i
= offset
/VM_PAGE_SIZE
;
67 if((foundregion
= region
->physblocks
[i
]))
68 assert(foundregion
->offset
== offset
);
72 void physblock_set(struct vir_region
*region
, vir_bytes offset
,
73 struct phys_region
*newphysr
)
77 assert(!(offset
% VM_PAGE_SIZE
));
78 assert( /* offset >= 0 && */ offset
< region
->length
);
79 i
= offset
/VM_PAGE_SIZE
;
80 proc
= region
->parent
;
83 assert(!region
->physblocks
[i
]);
84 assert(newphysr
->offset
== offset
);
85 proc
->vm_total
+= VM_PAGE_SIZE
;
86 if (proc
->vm_total
> proc
->vm_total_max
)
87 proc
->vm_total_max
= proc
->vm_total
;
89 assert(region
->physblocks
[i
]);
90 proc
->vm_total
-= VM_PAGE_SIZE
;
92 region
->physblocks
[i
] = newphysr
;
95 /*===========================================================================*
97 *===========================================================================*/
98 void map_printmap(struct vmproc
*vmp
)
100 struct vir_region
*vr
;
103 printf("memory regions in process %d:\n", vmp
->vm_endpoint
);
105 region_start_iter_least(&vmp
->vm_regions_avl
, &iter
);
106 while((vr
= region_get_iter(&iter
))) {
108 region_incr_iter(&iter
);
112 static struct vir_region
*getnextvr(struct vir_region
*vr
)
114 struct vir_region
*nextvr
;
117 region_start_iter(&vr
->parent
->vm_regions_avl
, &v_iter
, vr
->vaddr
, AVL_EQUAL
);
118 assert(region_get_iter(&v_iter
));
119 assert(region_get_iter(&v_iter
) == vr
);
120 region_incr_iter(&v_iter
);
121 nextvr
= region_get_iter(&v_iter
);
122 if(!nextvr
) return NULL
;
124 assert(vr
->parent
== nextvr
->parent
);
125 assert(vr
->vaddr
< nextvr
->vaddr
);
126 assert(vr
->vaddr
+ vr
->length
<= nextvr
->vaddr
);
130 static int pr_writable(struct vir_region
*vr
, struct phys_region
*pr
)
132 assert(pr
->memtype
->writable
);
133 return ((vr
->flags
& VR_WRITABLE
) && pr
->memtype
->writable(pr
));
138 /*===========================================================================*
139 * map_sanitycheck_pt *
140 *===========================================================================*/
141 static int map_sanitycheck_pt(struct vmproc
*vmp
,
142 struct vir_region
*vr
, struct phys_region
*pr
)
144 struct phys_block
*pb
= pr
->ph
;
148 if(pr_writable(vr
, pr
))
153 r
= pt_writemap(vmp
, &vmp
->vm_pt
, vr
->vaddr
+ pr
->offset
,
154 pb
->phys
, VM_PAGE_SIZE
, PTF_PRESENT
| PTF_USER
| rw
, WMF_VERIFY
);
157 printf("proc %d phys_region 0x%lx sanity check failed\n",
158 vmp
->vm_endpoint
, pr
->offset
);
165 /*===========================================================================*
167 *===========================================================================*/
168 void map_sanitycheck(const char *file
, int line
)
172 /* Macro for looping over all physical blocks of all regions of
175 #define ALLREGIONS(regioncode, physcode) \
176 for(vmp = vmproc; vmp < &vmproc[VMP_NR]; vmp++) { \
178 region_iter v_iter; \
179 struct vir_region *vr; \
180 if(!(vmp->vm_flags & VMF_INUSE)) \
182 region_start_iter_least(&vmp->vm_regions_avl, &v_iter); \
183 while((vr = region_get_iter(&v_iter))) { \
184 struct phys_region *pr; \
186 for(voffset = 0; voffset < vr->length; \
187 voffset += VM_PAGE_SIZE) { \
188 if(!(pr = physblock_get(vr, voffset))) \
192 region_incr_iter(&v_iter); \
196 #define MYSLABSANE(s) MYASSERT(slabsane_f(__FILE__, __LINE__, s, sizeof(*(s))))
197 /* Basic pointers check. */
198 ALLREGIONS(MYSLABSANE(vr
),MYSLABSANE(pr
); MYSLABSANE(pr
->ph
);MYSLABSANE(pr
->parent
));
199 ALLREGIONS(/* MYASSERT(vr->parent == vmp) */,MYASSERT(pr
->parent
== vr
););
201 /* Do counting for consistency check. */
202 ALLREGIONS(;,USE(pr
->ph
, pr
->ph
->seencount
= 0;););
203 ALLREGIONS(;,MYASSERT(pr
->offset
== voffset
););
204 ALLREGIONS(;,USE(pr
->ph
, pr
->ph
->seencount
++;);
205 if(pr
->ph
->seencount
== 1) {
206 if(pr
->memtype
->ev_sanitycheck
)
207 pr
->memtype
->ev_sanitycheck(pr
, file
, line
);
211 /* Do consistency check. */
212 ALLREGIONS({ struct vir_region
*nextvr
= getnextvr(vr
);
214 MYASSERT(vr
->vaddr
< nextvr
->vaddr
);
215 MYASSERT(vr
->vaddr
+ vr
->length
<= nextvr
->vaddr
);
218 MYASSERT(!(vr
->vaddr
% VM_PAGE_SIZE
));,
219 if(pr
->ph
->flags
& PBF_INCACHE
) pr
->ph
->seencount
++;
220 if(pr
->ph
->refcount
!= pr
->ph
->seencount
) {
222 printf("ph in vr %p: 0x%lx refcount %u "
223 "but seencount %u\n",
225 pr
->ph
->refcount
, pr
->ph
->seencount
);
229 struct phys_region
*others
;
230 if(pr
->ph
->refcount
> 0) {
231 MYASSERT(pr
->ph
->firstregion
);
232 if(pr
->ph
->refcount
== 1) {
233 MYASSERT(pr
->ph
->firstregion
== pr
);
236 MYASSERT(!pr
->ph
->firstregion
);
238 for(others
= pr
->ph
->firstregion
; others
;
239 others
= others
->next_ph_list
) {
241 MYASSERT(others
->ph
== pr
->ph
);
244 if(pr
->ph
->flags
& PBF_INCACHE
) n_others
++;
245 MYASSERT(pr
->ph
->refcount
== n_others
);
247 MYASSERT(pr
->ph
->refcount
== pr
->ph
->seencount
);
248 MYASSERT(!(pr
->offset
% VM_PAGE_SIZE
)););
249 ALLREGIONS(,MYASSERT(map_sanitycheck_pt(vmp
, vr
, pr
) == OK
));
254 /*=========================================================================*
256 *=========================================================================*/
257 int map_ph_writept(struct vmproc
*vmp
, struct vir_region
*vr
,
258 struct phys_region
*pr
)
260 int flags
= PTF_PRESENT
| PTF_USER
;
261 struct phys_block
*pb
= pr
->ph
;
267 assert(!(vr
->vaddr
% VM_PAGE_SIZE
));
268 assert(!(pr
->offset
% VM_PAGE_SIZE
));
269 assert(pb
->refcount
> 0);
271 if(pr_writable(vr
, pr
))
277 if(vr
->def_memtype
->pt_flags
)
278 flags
|= vr
->def_memtype
->pt_flags(vr
);
280 if(pt_writemap(vmp
, &vmp
->vm_pt
, vr
->vaddr
+ pr
->offset
,
281 pb
->phys
, VM_PAGE_SIZE
, flags
,
285 WMF_OVERWRITE
) != OK
) {
286 printf("VM: map_writept: pt_writemap failed\n");
291 USE(pr
, pr
->written
= 1;);
297 #define SLOT_FAIL ((vir_bytes) -1)
299 /*===========================================================================*
300 * region_find_slot_range *
301 *===========================================================================*/
302 static vir_bytes
region_find_slot_range(struct vmproc
*vmp
,
303 vir_bytes minv
, vir_bytes maxv
, vir_bytes length
)
305 struct vir_region
*lastregion
;
306 vir_bytes startv
= 0;
310 SANITYCHECK(SCL_FUNCTIONS
);
312 /* Length must be reasonable. */
315 /* Special case: allow caller to set maxv to 0 meaning 'I want
316 * it to be mapped in right here.'
319 maxv
= minv
+ length
;
323 printf("region_find_slot: minv 0x%lx and bytes 0x%lx\n",
329 /* Basic input sanity checks. */
330 assert(!(length
% VM_PAGE_SIZE
));
332 printf("VM: 1 minv: 0x%lx maxv: 0x%lx length: 0x%lx\n",
338 if(minv
+ length
> maxv
)
341 #define FREEVRANGE_TRY(rangestart, rangeend) { \
342 vir_bytes frstart = (rangestart), frend = (rangeend); \
343 frstart = MAX(frstart, minv); \
344 frend = MIN(frend, maxv); \
345 if(frend > frstart && (frend - frstart) >= length) { \
346 startv = frend-length; \
350 #define FREEVRANGE(start, end) { \
351 assert(!foundflag); \
352 FREEVRANGE_TRY(((start)+VM_PAGE_SIZE), ((end)-VM_PAGE_SIZE)); \
354 FREEVRANGE_TRY((start), (end)); \
358 /* find region after maxv. */
359 region_start_iter(&vmp
->vm_regions_avl
, &iter
, maxv
, AVL_GREATER_EQUAL
);
360 lastregion
= region_get_iter(&iter
);
363 /* This is the free virtual address space after the last region. */
364 region_start_iter(&vmp
->vm_regions_avl
, &iter
, maxv
, AVL_LESS
);
365 lastregion
= region_get_iter(&iter
);
366 FREEVRANGE(lastregion
?
367 lastregion
->vaddr
+lastregion
->length
: 0, VM_DATATOP
);
371 struct vir_region
*vr
;
372 while((vr
= region_get_iter(&iter
)) && !foundflag
) {
373 struct vir_region
*nextvr
;
374 region_decr_iter(&iter
);
375 nextvr
= region_get_iter(&iter
);
376 FREEVRANGE(nextvr
? nextvr
->vaddr
+nextvr
->length
: 0,
385 /* However we got it, startv must be in the requested range. */
386 assert(startv
>= minv
);
387 assert(startv
< maxv
);
388 assert(startv
+ length
<= maxv
);
390 /* remember this position as a hint for next time. */
391 vmp
->vm_region_top
= startv
+ length
;
396 /*===========================================================================*
398 *===========================================================================*/
399 static vir_bytes
region_find_slot(struct vmproc
*vmp
,
400 vir_bytes minv
, vir_bytes maxv
, vir_bytes length
)
402 vir_bytes v
, hint
= vmp
->vm_region_top
;
404 /* use the top of the last inserted region as a minv hint if
405 * possible. remember that a zero maxv is a special case.
408 if(maxv
&& hint
< maxv
&& hint
>= minv
) {
409 v
= region_find_slot_range(vmp
, minv
, hint
, length
);
415 return region_find_slot_range(vmp
, minv
, maxv
, length
);
418 static unsigned int phys_slot(vir_bytes len
)
420 assert(!(len
% VM_PAGE_SIZE
));
421 return len
/ VM_PAGE_SIZE
;
424 static struct vir_region
*region_new(struct vmproc
*vmp
, vir_bytes startv
, vir_bytes length
,
425 int flags
, mem_type_t
*memtype
)
427 struct vir_region
*newregion
;
428 struct phys_region
**newphysregions
;
430 int slots
= phys_slot(length
);
432 if(!(SLABALLOC(newregion
))) {
433 printf("vm: region_new: could not allocate\n");
437 /* Fill in node details. */
439 memset(newregion
, 0, sizeof(*newregion
));
440 newregion
->vaddr
= startv
;
441 newregion
->length
= length
;
442 newregion
->flags
= flags
;
443 newregion
->def_memtype
= memtype
;
444 newregion
->remaps
= 0;
445 newregion
->id
= id
++;
446 newregion
->lower
= newregion
->higher
= NULL
;
447 newregion
->parent
= vmp
;);
449 if(!(newphysregions
= calloc(slots
, sizeof(struct phys_region
*)))) {
450 printf("VM: region_new: allocating phys blocks failed\n");
455 USE(newregion
, newregion
->physblocks
= newphysregions
;);
460 /*===========================================================================*
462 *===========================================================================*/
463 struct vir_region
*map_page_region(struct vmproc
*vmp
, vir_bytes minv
,
464 vir_bytes maxv
, vir_bytes length
, u32_t flags
, int mapflags
,
467 struct vir_region
*newregion
;
470 assert(!(length
% VM_PAGE_SIZE
));
472 SANITYCHECK(SCL_FUNCTIONS
);
474 startv
= region_find_slot(vmp
, minv
, maxv
, length
);
475 if (startv
== SLOT_FAIL
)
478 /* Now we want a new region. */
479 if(!(newregion
= region_new(vmp
, startv
, length
, flags
, memtype
))) {
480 printf("VM: map_page_region: allocating region failed\n");
484 /* If a new event is specified, invoke it. */
485 if(newregion
->def_memtype
->ev_new
) {
486 if(newregion
->def_memtype
->ev_new(newregion
) != OK
) {
487 /* ev_new will have freed and removed the region */
492 if(mapflags
& MF_PREALLOC
) {
493 if(map_handle_memory(vmp
, newregion
, 0, length
, 1,
495 printf("VM: map_page_region: prealloc failed\n");
496 free(newregion
->physblocks
);
498 newregion
->physblocks
= NULL
;);
504 /* Pre-allocations should be uninitialized, but after that it's a
507 USE(newregion
, newregion
->flags
&= ~VR_UNINITIALIZED
;);
510 region_insert(&vmp
->vm_regions_avl
, newregion
);
513 assert(startv
== newregion
->vaddr
);
515 struct vir_region
*nextvr
;
516 if((nextvr
= getnextvr(newregion
))) {
517 assert(newregion
->vaddr
< nextvr
->vaddr
);
522 SANITYCHECK(SCL_FUNCTIONS
);
527 /*===========================================================================*
529 *===========================================================================*/
530 static int map_subfree(struct vir_region
*region
,
531 vir_bytes start
, vir_bytes len
)
533 struct phys_region
*pr
;
534 vir_bytes end
= start
+len
;
539 for(voffset
= 0; voffset
< phys_slot(region
->length
);
540 voffset
+= VM_PAGE_SIZE
) {
541 struct phys_region
*others
;
542 struct phys_block
*pb
;
544 if(!(pr
= physblock_get(region
, voffset
)))
549 for(others
= pb
->firstregion
; others
;
550 others
= others
->next_ph_list
) {
551 assert(others
->ph
== pb
);
556 for(voffset
= start
; voffset
< end
; voffset
+=VM_PAGE_SIZE
) {
557 if(!(pr
= physblock_get(region
, voffset
)))
559 assert(pr
->offset
>= start
);
560 assert(pr
->offset
< end
);
561 pb_unreferenced(region
, pr
, 1);
568 /*===========================================================================*
570 *===========================================================================*/
571 int map_free(struct vir_region
*region
)
575 if((r
=map_subfree(region
, 0, region
->length
)) != OK
) {
576 printf("%d\n", __LINE__
);
580 if(region
->def_memtype
->ev_delete
)
581 region
->def_memtype
->ev_delete(region
);
582 free(region
->physblocks
);
583 region
->physblocks
= NULL
;
589 /*========================================================================*
591 *========================================================================*/
592 int map_free_proc(struct vmproc
*vmp
)
594 struct vir_region
*r
;
596 while((r
= region_search_root(&vmp
->vm_regions_avl
))) {
597 SANITYCHECK(SCL_DETAIL
);
601 region_remove(&vmp
->vm_regions_avl
, r
->vaddr
); /* For sanity checks. */
606 SANITYCHECK(SCL_DETAIL
);
609 region_init(&vmp
->vm_regions_avl
);
611 SANITYCHECK(SCL_FUNCTIONS
);
616 /*===========================================================================*
618 *===========================================================================*/
619 struct vir_region
*map_lookup(struct vmproc
*vmp
,
620 vir_bytes offset
, struct phys_region
**physr
)
622 struct vir_region
*r
;
624 SANITYCHECK(SCL_FUNCTIONS
);
627 if(!region_search_root(&vmp
->vm_regions_avl
))
628 panic("process has no regions: %d", vmp
->vm_endpoint
);
631 if((r
= region_search(&vmp
->vm_regions_avl
, offset
, AVL_LESS_EQUAL
))) {
633 if(offset
>= r
->vaddr
&& offset
< r
->vaddr
+ r
->length
) {
634 ph
= offset
- r
->vaddr
;
636 *physr
= physblock_get(r
, ph
);
637 if(*physr
) assert((*physr
)->offset
== ph
);
643 SANITYCHECK(SCL_FUNCTIONS
);
648 u32_t
vrallocflags(u32_t flags
)
650 u32_t allocflags
= 0;
652 if(flags
& VR_PHYS64K
)
653 allocflags
|= PAF_ALIGN64K
;
654 if(flags
& VR_LOWER16MB
)
655 allocflags
|= PAF_LOWER16MB
;
656 if(flags
& VR_LOWER1MB
)
657 allocflags
|= PAF_LOWER1MB
;
658 if(!(flags
& VR_UNINITIALIZED
))
659 allocflags
|= PAF_CLEAR
;
664 /*===========================================================================*
666 *===========================================================================*/
667 int map_pf(struct vmproc
*vmp
,
668 struct vir_region
*region
,
671 vfs_callback_t pf_callback
,
676 struct phys_region
*ph
;
679 offset
-= offset
% VM_PAGE_SIZE
;
681 /* assert(offset >= 0); */ /* always true */
682 assert(offset
< region
->length
);
684 assert(!(region
->vaddr
% VM_PAGE_SIZE
));
685 assert(!(write
&& !(region
->flags
& VR_WRITABLE
)));
687 SANITYCHECK(SCL_FUNCTIONS
);
689 if(!(ph
= physblock_get(region
, offset
))) {
690 struct phys_block
*pb
;
694 if(!(pb
= pb_new(MAP_NONE
))) {
695 printf("map_pf: pb_new failed\n");
699 if(!(ph
= pb_reference(pb
, offset
, region
,
700 region
->def_memtype
))) {
701 printf("map_pf: pb_reference failed\n");
710 /* If we're writing and the block is already
711 * writable, nothing to do.
714 assert(ph
->memtype
->writable
);
716 if(!write
|| !ph
->memtype
->writable(ph
)) {
717 assert(ph
->memtype
->ev_pagefault
);
720 if((r
= ph
->memtype
->ev_pagefault(vmp
,
721 region
, ph
, write
, pf_callback
, state
, len
, io
)) == SUSPEND
) {
727 printf("map_pf: pagefault in %s failed\n", ph
->memtype
->name
);
730 pb_unreferenced(region
, ph
, 1);
736 assert(ph
->ph
->phys
!= MAP_NONE
);
740 assert(ph
->ph
->phys
!= MAP_NONE
);
742 if((r
= map_ph_writept(vmp
, region
, ph
)) != OK
) {
743 printf("map_pf: writept failed\n");
747 SANITYCHECK(SCL_FUNCTIONS
);
750 if(OK
!= pt_checkrange(&vmp
->vm_pt
, region
->vaddr
+offset
,
751 VM_PAGE_SIZE
, write
)) {
752 panic("map_pf: pt_checkrange failed: %d", r
);
759 int map_handle_memory(struct vmproc
*vmp
,
760 struct vir_region
*region
, vir_bytes start_offset
, vir_bytes length
,
761 int write
, vfs_callback_t cb
, void *state
, int statelen
)
763 vir_bytes offset
, lim
;
768 lim
= start_offset
+ length
;
769 assert(lim
> start_offset
);
771 for(offset
= start_offset
; offset
< lim
; offset
+= VM_PAGE_SIZE
)
772 if((r
= map_pf(vmp
, region
, offset
, write
,
773 cb
, state
, statelen
, &io
)) != OK
)
779 /*===========================================================================*
781 *===========================================================================*/
782 int map_pin_memory(struct vmproc
*vmp
)
784 struct vir_region
*vr
;
787 region_start_iter_least(&vmp
->vm_regions_avl
, &iter
);
788 /* Scan all memory regions. */
789 while((vr
= region_get_iter(&iter
))) {
790 /* Make sure region is mapped to physical memory and writable.*/
791 r
= map_handle_memory(vmp
, vr
, 0, vr
->length
, 1, NULL
, 0, 0);
793 panic("map_pin_memory: map_handle_memory failed: %d", r
);
795 region_incr_iter(&iter
);
800 /*===========================================================================*
802 *===========================================================================*/
803 struct vir_region
*map_copy_region(struct vmproc
*vmp
, struct vir_region
*vr
)
805 /* map_copy_region creates a complete copy of the vir_region
806 * data structure, linking in the same phys_blocks directly,
807 * but all in limbo, i.e., the caller has to link the vir_region
808 * to a process. Therefore it doesn't increase the refcount in
809 * the phys_block; the caller has to do this once it's linked.
810 * The reason for this is to keep the sanity checks working
811 * within this function.
813 struct vir_region
*newvr
;
814 struct phys_region
*ph
;
818 cr
= physregions(vr
);
822 if(!(newvr
= region_new(vr
->parent
, vr
->vaddr
, vr
->length
, vr
->flags
, vr
->def_memtype
)))
825 USE(newvr
, newvr
->parent
= vmp
;);
827 if(vr
->def_memtype
->ev_copy
&& (r
=vr
->def_memtype
->ev_copy(vr
, newvr
)) != OK
) {
829 printf("VM: memtype-specific copy failed (%d)\n", r
);
833 for(p
= 0; p
< phys_slot(vr
->length
); p
++) {
834 struct phys_region
*newph
;
836 if(!(ph
= physblock_get(vr
, p
*VM_PAGE_SIZE
))) continue;
837 newph
= pb_reference(ph
->ph
, ph
->offset
, newvr
,
840 if(!newph
) { map_free(newvr
); return NULL
; }
842 if(ph
->memtype
->ev_reference
)
843 ph
->memtype
->ev_reference(ph
, newph
);
846 USE(newph
, newph
->written
= 0;);
847 assert(physregions(vr
) == cr
);
852 assert(physregions(vr
) == physregions(newvr
));
858 /*===========================================================================*
860 *===========================================================================*/
861 int copy_abs2region(phys_bytes absaddr
, struct vir_region
*destregion
,
862 phys_bytes offset
, phys_bytes len
)
866 assert(destregion
->physblocks
);
868 phys_bytes sublen
, suboffset
;
869 struct phys_region
*ph
;
871 assert(destregion
->physblocks
);
872 if(!(ph
= physblock_get(destregion
, offset
))) {
873 printf("VM: copy_abs2region: no phys region found (1).\n");
876 assert(ph
->offset
<= offset
);
877 if(ph
->offset
+VM_PAGE_SIZE
<= offset
) {
878 printf("VM: copy_abs2region: no phys region found (2).\n");
881 suboffset
= offset
- ph
->offset
;
882 assert(suboffset
< VM_PAGE_SIZE
);
884 if(sublen
> VM_PAGE_SIZE
- suboffset
)
885 sublen
= VM_PAGE_SIZE
- suboffset
;
886 assert(suboffset
+ sublen
<= VM_PAGE_SIZE
);
887 if(ph
->ph
->refcount
!= 1) {
888 printf("VM: copy_abs2region: refcount not 1.\n");
892 if(sys_abscopy(absaddr
, ph
->ph
->phys
+ suboffset
, sublen
) != OK
) {
893 printf("VM: copy_abs2region: abscopy failed.\n");
904 /*=========================================================================*
906 *=========================================================================*/
907 int map_writept(struct vmproc
*vmp
)
909 struct vir_region
*vr
;
910 struct phys_region
*ph
;
913 region_start_iter_least(&vmp
->vm_regions_avl
, &v_iter
);
915 while((vr
= region_get_iter(&v_iter
))) {
917 for(p
= 0; p
< vr
->length
; p
+= VM_PAGE_SIZE
) {
918 if(!(ph
= physblock_get(vr
, p
))) continue;
920 if((r
=map_ph_writept(vmp
, vr
, ph
)) != OK
) {
921 printf("VM: map_writept: failed\n");
925 region_incr_iter(&v_iter
);
931 /*========================================================================*
933 *========================================================================*/
934 int map_proc_copy(struct vmproc
*dst
, struct vmproc
*src
)
936 /* Copy all the memory regions from the src process to the dst process. */
937 region_init(&dst
->vm_regions_avl
);
939 return map_proc_copy_from(dst
, src
, NULL
);
942 /*========================================================================*
943 * map_proc_copy_from *
944 *========================================================================*/
945 int map_proc_copy_from(struct vmproc
*dst
, struct vmproc
*src
,
946 struct vir_region
*start_src_vr
)
948 struct vir_region
*vr
;
952 start_src_vr
= region_search_least(&src
->vm_regions_avl
);
954 assert(start_src_vr
);
955 assert(start_src_vr
->parent
== src
);
956 region_start_iter(&src
->vm_regions_avl
, &v_iter
,
957 start_src_vr
->vaddr
, AVL_EQUAL
);
958 assert(region_get_iter(&v_iter
) == start_src_vr
);
960 /* Copy source regions after the destination's last region (if any). */
962 SANITYCHECK(SCL_FUNCTIONS
);
964 while((vr
= region_get_iter(&v_iter
))) {
965 struct vir_region
*newvr
;
966 if(!(newvr
= map_copy_region(dst
, vr
))) {
970 region_insert(&dst
->vm_regions_avl
, newvr
);
971 assert(vr
->length
== newvr
->length
);
976 struct phys_region
*orig_ph
, *new_ph
;
977 assert(vr
->physblocks
!= newvr
->physblocks
);
978 for(vaddr
= 0; vaddr
< vr
->length
; vaddr
+= VM_PAGE_SIZE
) {
979 orig_ph
= physblock_get(vr
, vaddr
);
980 new_ph
= physblock_get(newvr
, vaddr
);
981 if(!orig_ph
) { assert(!new_ph
); continue;}
983 assert(orig_ph
!= new_ph
);
984 assert(orig_ph
->ph
== new_ph
->ph
);
988 region_incr_iter(&v_iter
);
994 SANITYCHECK(SCL_FUNCTIONS
);
998 int map_region_extend_upto_v(struct vmproc
*vmp
, vir_bytes v
)
1000 vir_bytes offset
= v
, limit
, extralen
;
1001 struct vir_region
*vr
, *nextvr
;
1002 struct phys_region
**newpr
;
1003 int newslots
, prevslots
, addedslots
, r
;
1005 offset
= roundup(offset
, VM_PAGE_SIZE
);
1007 if(!(vr
= region_search(&vmp
->vm_regions_avl
, offset
, AVL_LESS
))) {
1008 printf("VM: nothing to extend\n");
1012 if(vr
->vaddr
+ vr
->length
>= v
) return OK
;
1014 limit
= vr
->vaddr
+ vr
->length
;
1016 assert(vr
->vaddr
<= offset
);
1017 newslots
= phys_slot(offset
- vr
->vaddr
);
1018 prevslots
= phys_slot(vr
->length
);
1019 assert(newslots
>= prevslots
);
1020 addedslots
= newslots
- prevslots
;
1021 extralen
= offset
- limit
;
1022 assert(extralen
> 0);
1024 if((nextvr
= getnextvr(vr
))) {
1025 assert(offset
<= nextvr
->vaddr
);
1028 if(nextvr
&& nextvr
->vaddr
< offset
) {
1029 printf("VM: can't grow into next region\n");
1033 if(!vr
->def_memtype
->ev_resize
) {
1034 if(!map_page_region(vmp
, limit
, 0, extralen
,
1035 VR_WRITABLE
| VR_ANON
,
1036 0, &mem_type_anon
)) {
1037 printf("resize: couldn't put anon memory there\n");
1043 if(!(newpr
= realloc(vr
->physblocks
,
1044 newslots
* sizeof(struct phys_region
*)))) {
1045 printf("VM: map_region_extend_upto_v: realloc failed\n");
1049 vr
->physblocks
= newpr
;
1050 memset(vr
->physblocks
+ prevslots
, 0,
1051 addedslots
* sizeof(struct phys_region
*));
1053 r
= vr
->def_memtype
->ev_resize(vmp
, vr
, offset
- vr
->vaddr
);
1058 /*========================================================================*
1059 * map_unmap_region *
1060 *========================================================================*/
1061 int map_unmap_region(struct vmproc
*vmp
, struct vir_region
*r
,
1062 vir_bytes offset
, vir_bytes len
)
1064 /* Shrink the region by 'len' bytes, from the start. Unreference
1065 * memory it used to reference if any.
1067 vir_bytes regionstart
;
1068 int freeslots
= phys_slot(len
);
1070 SANITYCHECK(SCL_FUNCTIONS
);
1072 if(offset
+len
> r
->length
|| (len
% VM_PAGE_SIZE
)) {
1073 printf("VM: bogus length 0x%lx\n", len
);
1077 regionstart
= r
->vaddr
+ offset
;
1079 /* unreference its memory */
1080 map_subfree(r
, offset
, len
);
1082 /* if unmap was at start/end of this region, it actually shrinks */
1083 if(r
->length
== len
) {
1084 /* Whole region disappears. Unlink and free it. */
1085 region_remove(&vmp
->vm_regions_avl
, r
->vaddr
);
1087 } else if(offset
== 0) {
1088 struct phys_region
*pr
;
1092 if(!r
->def_memtype
->ev_lowshrink
) {
1093 printf("VM: low-shrinking not implemented for %s\n",
1094 r
->def_memtype
->name
);
1098 if(r
->def_memtype
->ev_lowshrink(r
, len
) != OK
) {
1099 printf("VM: low-shrinking failed for %s\n",
1100 r
->def_memtype
->name
);
1104 region_remove(&vmp
->vm_regions_avl
, r
->vaddr
);
1109 remslots
= phys_slot(r
->length
);
1111 region_insert(&vmp
->vm_regions_avl
, r
);
1113 /* vaddr has increased; to make all the phys_regions
1114 * point to the same addresses, make them shrink by the
1117 for(voffset
= len
; voffset
< r
->length
;
1118 voffset
+= VM_PAGE_SIZE
) {
1119 if(!(pr
= physblock_get(r
, voffset
))) continue;
1120 assert(pr
->offset
>= offset
);
1121 assert(pr
->offset
>= len
);
1122 USE(pr
, pr
->offset
-= len
;);
1125 memmove(r
->physblocks
, r
->physblocks
+ freeslots
,
1126 remslots
* sizeof(struct phys_region
*));
1127 USE(r
, r
->length
-= len
;);
1128 } else if(offset
+ len
== r
->length
) {
1129 assert(len
<= r
->length
);
1133 SANITYCHECK(SCL_DETAIL
);
1135 if(pt_writemap(vmp
, &vmp
->vm_pt
, regionstart
,
1136 MAP_NONE
, len
, 0, WMF_OVERWRITE
) != OK
) {
1137 printf("VM: map_unmap_region: pt_writemap failed\n");
1141 SANITYCHECK(SCL_FUNCTIONS
);
1146 static int split_region(struct vmproc
*vmp
, struct vir_region
*vr
,
1147 struct vir_region
**vr1
, struct vir_region
**vr2
, vir_bytes split_len
)
1149 struct vir_region
*r1
= NULL
, *r2
= NULL
;
1150 vir_bytes rem_len
= vr
->length
- split_len
;
1155 assert(!(split_len
% VM_PAGE_SIZE
));
1156 assert(!(rem_len
% VM_PAGE_SIZE
));
1157 assert(!(vr
->vaddr
% VM_PAGE_SIZE
));
1158 assert(!(vr
->length
% VM_PAGE_SIZE
));
1160 if(!vr
->def_memtype
->ev_split
) {
1161 printf("VM: split region not implemented for %s\n",
1162 vr
->def_memtype
->name
);
1166 slots1
= phys_slot(split_len
);
1167 slots2
= phys_slot(rem_len
);
1169 if(!(r1
= region_new(vmp
, vr
->vaddr
, split_len
, vr
->flags
,
1170 vr
->def_memtype
))) {
1174 if(!(r2
= region_new(vmp
, vr
->vaddr
+split_len
, rem_len
, vr
->flags
,
1175 vr
->def_memtype
))) {
1180 for(voffset
= 0; voffset
< r1
->length
; voffset
+= VM_PAGE_SIZE
) {
1181 struct phys_region
*ph
, *phn
;
1182 if(!(ph
= physblock_get(vr
, voffset
))) continue;
1183 if(!(phn
= pb_reference(ph
->ph
, voffset
, r1
, ph
->memtype
)))
1188 for(voffset
= 0; voffset
< r2
->length
; voffset
+= VM_PAGE_SIZE
) {
1189 struct phys_region
*ph
, *phn
;
1190 if(!(ph
= physblock_get(vr
, split_len
+ voffset
))) continue;
1191 if(!(phn
= pb_reference(ph
->ph
, voffset
, r2
, ph
->memtype
)))
1196 vr
->def_memtype
->ev_split(vmp
, vr
, r1
, r2
);
1198 region_remove(&vmp
->vm_regions_avl
, vr
->vaddr
);
1200 region_insert(&vmp
->vm_regions_avl
, r1
);
1201 region_insert(&vmp
->vm_regions_avl
, r2
);
1209 if(r1
) map_free(r1
);
1210 if(r2
) map_free(r2
);
1212 printf("split_region: failed\n");
1217 int map_unmap_range(struct vmproc
*vmp
, vir_bytes unmap_start
, vir_bytes length
)
1219 vir_bytes o
= unmap_start
% VM_PAGE_SIZE
, unmap_limit
;
1221 struct vir_region
*vr
, *nextvr
;
1225 length
= roundup(length
, VM_PAGE_SIZE
);
1226 unmap_limit
= length
+ unmap_start
;
1228 if(length
< VM_PAGE_SIZE
) return EINVAL
;
1229 if(unmap_limit
<= unmap_start
) return EINVAL
;
1231 region_start_iter(&vmp
->vm_regions_avl
, &v_iter
, unmap_start
, AVL_LESS_EQUAL
);
1233 if(!(vr
= region_get_iter(&v_iter
))) {
1234 region_start_iter(&vmp
->vm_regions_avl
, &v_iter
, unmap_start
, AVL_GREATER
);
1235 if(!(vr
= region_get_iter(&v_iter
))) {
1242 for(; vr
&& vr
->vaddr
< unmap_limit
; vr
= nextvr
) {
1243 vir_bytes thislimit
= vr
->vaddr
+ vr
->length
;
1244 vir_bytes this_unmap_start
, this_unmap_limit
;
1245 vir_bytes remainlen
;
1248 region_incr_iter(&v_iter
);
1249 nextvr
= region_get_iter(&v_iter
);
1251 assert(thislimit
> vr
->vaddr
);
1253 this_unmap_start
= MAX(unmap_start
, vr
->vaddr
);
1254 this_unmap_limit
= MIN(unmap_limit
, thislimit
);
1256 if(this_unmap_start
>= this_unmap_limit
) continue;
1258 if(this_unmap_start
> vr
->vaddr
&& this_unmap_limit
< thislimit
) {
1259 struct vir_region
*vr1
, *vr2
;
1260 vir_bytes split_len
= this_unmap_limit
- vr
->vaddr
;
1261 assert(split_len
> 0);
1262 assert(split_len
< vr
->length
);
1263 if((r
=split_region(vmp
, vr
, &vr1
, &vr2
, split_len
)) != OK
) {
1264 printf("VM: unmap split failed\n");
1268 thislimit
= vr
->vaddr
+ vr
->length
;
1271 remainlen
= this_unmap_limit
- vr
->vaddr
;
1273 assert(this_unmap_start
>= vr
->vaddr
);
1274 assert(this_unmap_limit
<= thislimit
);
1275 assert(remainlen
> 0);
1277 r
= map_unmap_region(vmp
, vr
, this_unmap_start
- vr
->vaddr
,
1278 this_unmap_limit
- this_unmap_start
);
1281 printf("map_unmap_range: map_unmap_region failed\n");
1285 region_start_iter(&vmp
->vm_regions_avl
, &v_iter
, nextvr
->vaddr
, AVL_EQUAL
);
1286 assert(region_get_iter(&v_iter
) == nextvr
);
1293 /*========================================================================*
1295 *========================================================================*/
1296 int map_get_phys(struct vmproc
*vmp
, vir_bytes addr
, phys_bytes
*r
)
1298 struct vir_region
*vr
;
1300 if (!(vr
= map_lookup(vmp
, addr
, NULL
)) ||
1301 (vr
->vaddr
!= addr
))
1304 if (!vr
->def_memtype
->regionid
)
1308 *r
= vr
->def_memtype
->regionid(vr
);
1313 /*========================================================================*
1315 *========================================================================*/
1316 int map_get_ref(struct vmproc
*vmp
, vir_bytes addr
, u8_t
*cnt
)
1318 struct vir_region
*vr
;
1320 if (!(vr
= map_lookup(vmp
, addr
, NULL
)) ||
1321 (vr
->vaddr
!= addr
) || !vr
->def_memtype
->refcount
)
1325 *cnt
= vr
->def_memtype
->refcount(vr
);
1330 void get_usage_info_kernel(struct vm_usage_info
*vui
)
1332 memset(vui
, 0, sizeof(*vui
));
1333 vui
->vui_total
= kernel_boot_info
.kernel_allocated_bytes
+
1334 kernel_boot_info
.kernel_allocated_bytes_dynamic
;
1337 static void get_usage_info_vm(struct vm_usage_info
*vui
)
1339 memset(vui
, 0, sizeof(*vui
));
1340 vui
->vui_total
= kernel_boot_info
.vm_allocated_bytes
+
1341 get_vm_self_pages() * VM_PAGE_SIZE
;
1344 /*========================================================================*
1346 *========================================================================*/
1347 void get_usage_info(struct vmproc
*vmp
, struct vm_usage_info
*vui
)
1349 struct vir_region
*vr
;
1350 struct phys_region
*ph
;
1352 region_start_iter_least(&vmp
->vm_regions_avl
, &v_iter
);
1355 memset(vui
, 0, sizeof(*vui
));
1357 if(vmp
->vm_endpoint
== VM_PROC_NR
) {
1358 get_usage_info_vm(vui
);
1362 if(vmp
->vm_endpoint
< 0) {
1363 get_usage_info_kernel(vui
);
1367 while((vr
= region_get_iter(&v_iter
))) {
1368 for(voffset
= 0; voffset
< vr
->length
; voffset
+= VM_PAGE_SIZE
) {
1369 if(!(ph
= physblock_get(vr
, voffset
))) continue;
1370 /* All present pages are counted towards the total. */
1371 vui
->vui_total
+= VM_PAGE_SIZE
;
1373 if (ph
->ph
->refcount
> 1) {
1374 /* Any page with a refcount > 1 is common. */
1375 vui
->vui_common
+= VM_PAGE_SIZE
;
1377 /* Any common, non-COW page is shared. */
1378 if (vr
->flags
& VR_SHARED
)
1379 vui
->vui_shared
+= VM_PAGE_SIZE
;
1382 region_incr_iter(&v_iter
);
1386 /*===========================================================================*
1388 *===========================================================================*/
1389 int get_region_info(struct vmproc
*vmp
, struct vm_region_info
*vri
,
1390 int max
, vir_bytes
*nextp
)
1392 struct vir_region
*vr
;
1401 region_start_iter(&vmp
->vm_regions_avl
, &v_iter
, next
, AVL_GREATER_EQUAL
);
1402 if(!(vr
= region_get_iter(&v_iter
))) return 0;
1404 for(count
= 0; (vr
= region_get_iter(&v_iter
)) && count
< max
;
1405 region_incr_iter(&v_iter
)) {
1406 struct phys_region
*ph1
= NULL
, *ph2
= NULL
;
1409 /* where to start on next iteration, regardless of what we find now */
1410 next
= vr
->vaddr
+ vr
->length
;
1412 /* Report part of the region that's actually in use. */
1414 /* Get first and last phys_regions, if any */
1415 for(voffset
= 0; voffset
< vr
->length
; voffset
+= VM_PAGE_SIZE
) {
1416 struct phys_region
*ph
;
1417 if(!(ph
= physblock_get(vr
, voffset
))) continue;
1423 printf("skipping empty region 0x%lx-0x%lx\n",
1424 vr
->vaddr
, vr
->vaddr
+vr
->length
);
1428 /* Report start+length of region starting from lowest use. */
1429 vri
->vri_addr
= vr
->vaddr
+ ph1
->offset
;
1430 vri
->vri_prot
= PROT_READ
;
1431 vri
->vri_length
= ph2
->offset
+ VM_PAGE_SIZE
- ph1
->offset
;
1433 /* "AND" the provided protection with per-page protection. */
1434 if (vr
->flags
& VR_WRITABLE
)
1435 vri
->vri_prot
|= PROT_WRITE
;
1444 /*========================================================================*
1445 * regionprintstats *
1446 *========================================================================*/
1447 void printregionstats(struct vmproc
*vmp
)
1449 struct vir_region
*vr
;
1450 struct phys_region
*pr
;
1451 vir_bytes used
= 0, weighted
= 0;
1453 region_start_iter_least(&vmp
->vm_regions_avl
, &v_iter
);
1455 while((vr
= region_get_iter(&v_iter
))) {
1457 region_incr_iter(&v_iter
);
1458 if(vr
->flags
& VR_DIRECT
)
1460 for(voffset
= 0; voffset
< vr
->length
; voffset
+=VM_PAGE_SIZE
) {
1461 if(!(pr
= physblock_get(vr
, voffset
))) continue;
1462 used
+= VM_PAGE_SIZE
;
1463 weighted
+= VM_PAGE_SIZE
/ pr
->ph
->refcount
;
1467 printf("%6lukB %6lukB\n", used
/1024, weighted
/1024);
1472 void map_setparent(struct vmproc
*vmp
)
1475 struct vir_region
*vr
;
1476 region_start_iter_least(&vmp
->vm_regions_avl
, &iter
);
1477 while((vr
= region_get_iter(&iter
))) {
1478 USE(vr
, vr
->parent
= vmp
;);
1479 region_incr_iter(&iter
);
1483 unsigned int physregions(struct vir_region
*vr
)
1487 for(voffset
= 0; voffset
< vr
->length
; voffset
+= VM_PAGE_SIZE
) {
1488 if(physblock_get(vr
, voffset
))