2 /* This file contains some utility routines for VM. */
6 #define brk _brk /* get rid of no previous prototype warning */
8 #include <minix/callnr.h>
10 #include <minix/config.h>
11 #include <minix/const.h>
13 #include <minix/endpoint.h>
14 #include <minix/minlib.h>
15 #include <minix/type.h>
16 #include <minix/ipc.h>
17 #include <minix/sysutil.h>
18 #include <minix/syslib.h>
19 #include <minix/type.h>
20 #include <minix/bitmap.h>
26 #include <sys/param.h>
28 #include <sys/resource.h>
34 #include "sanitycheck.h"
36 #include <machine/archtypes.h>
37 #include "kernel/const.h"
38 #include "kernel/config.h"
39 #include "kernel/type.h"
40 #include "kernel/proc.h"
42 /*===========================================================================*
44 *===========================================================================*/
46 struct memory
*mem_chunks
) /* store mem chunks here */
48 /* Initialize the free memory list from the kernel-provided memory map. Translate
49 * the byte offsets and sizes in this list to clicks, properly truncated.
51 phys_bytes base
, size
, limit
;
55 /* Initialize everything to zero. */
56 memset(mem_chunks
, 0, NR_MEMS
*sizeof(*mem_chunks
));
58 /* Obtain and parse memory from kernel environment. */
59 /* XXX Any memory chunk in excess of NR_MEMS is silently ignored. */
60 for(i
= 0; i
< MIN(MAXMEMMAP
, NR_MEMS
); i
++) {
61 mem_chunks
[i
].base
= kernel_boot_info
.memmap
[i
].mm_base_addr
;
62 mem_chunks
[i
].size
= kernel_boot_info
.memmap
[i
].mm_length
;
65 /* Round physical memory to clicks. Round start up, round end down. */
66 for (i
= 0; i
< NR_MEMS
; i
++) {
67 memp
= &mem_chunks
[i
]; /* next mem chunk is stored here */
68 base
= mem_chunks
[i
].base
;
69 size
= mem_chunks
[i
].size
;
71 base
= (phys_bytes
) (CLICK_CEIL(base
));
72 limit
= (phys_bytes
) (CLICK_FLOOR(limit
));
74 memp
->base
= memp
->size
= 0;
76 memp
->base
= base
>> CLICK_SHIFT
;
77 memp
->size
= (limit
- base
) >> CLICK_SHIFT
;
82 /*===========================================================================*
84 *===========================================================================*/
85 int vm_isokendpt(endpoint_t endpoint
, int *procn
)
87 *procn
= _ENDPOINT_P(endpoint
);
88 if(*procn
< 0 || *procn
>= NR_PROCS
)
90 if(*procn
>= 0 && endpoint
!= vmproc
[*procn
].vm_endpoint
)
92 if(*procn
>= 0 && !(vmproc
[*procn
].vm_flags
& VMF_INUSE
))
98 /*===========================================================================*
100 *===========================================================================*/
101 int do_info(message
*m
)
103 struct vm_stats_info vsi
;
104 struct vm_usage_info vui
;
105 static struct vm_region_info vri
[MAX_VRI_COUNT
];
107 vir_bytes addr
, size
, next
, ptr
;
108 int r
, pr
, dummy
, count
, free_pages
, largest_contig
;
110 if (vm_isokendpt(m
->m_source
, &pr
) != OK
)
114 ptr
= (vir_bytes
) m
->m_lsys_vm_info
.ptr
;
116 switch(m
->m_lsys_vm_info
.what
) {
118 vsi
.vsi_pagesize
= VM_PAGE_SIZE
;
119 vsi
.vsi_total
= total_pages
;
120 memstats(&dummy
, &free_pages
, &largest_contig
);
121 vsi
.vsi_free
= free_pages
;
122 vsi
.vsi_largest
= largest_contig
;
124 get_stats_info(&vsi
);
126 addr
= (vir_bytes
) &vsi
;
132 if(m
->m_lsys_vm_info
.ep
< 0)
133 get_usage_info_kernel(&vui
);
134 else if (vm_isokendpt(m
->m_lsys_vm_info
.ep
, &pr
) != OK
)
136 else get_usage_info(&vmproc
[pr
], &vui
);
138 addr
= (vir_bytes
) &vui
;
144 if (vm_isokendpt(m
->m_lsys_vm_info
.ep
, &pr
) != OK
)
147 count
= MIN(m
->m_lsys_vm_info
.count
, MAX_VRI_COUNT
);
148 next
= m
->m_lsys_vm_info
.next
;
150 count
= get_region_info(&vmproc
[pr
], vri
, count
, &next
);
152 m
->m_lsys_vm_info
.count
= count
;
153 m
->m_lsys_vm_info
.next
= next
;
155 addr
= (vir_bytes
) vri
;
156 size
= sizeof(vri
[0]) * count
;
167 /* Make sure that no page faults can occur while copying out. A page
168 * fault would cause the kernel to send a notify to us, while we would
169 * be waiting for the result of the copy system call, resulting in a
170 * deadlock. Note that no memory mapping can be undone without the
171 * involvement of VM, so we are safe until we're done.
173 r
= handle_memory_once(vmp
, ptr
, size
, 1 /*wrflag*/);
174 if (r
!= OK
) return r
;
176 /* Now that we know the copy out will succeed, perform the actual copy
179 return sys_datacopy(SELF
, addr
,
180 (vir_bytes
) vmp
->vm_endpoint
, ptr
, size
);
183 /*===========================================================================*
185 *===========================================================================*/
186 int swap_proc_slot(struct vmproc
*src_vmp
, struct vmproc
*dst_vmp
)
188 struct vmproc orig_src_vmproc
, orig_dst_vmproc
;
191 printf("VM: swap_proc: swapping %d (%d) and %d (%d)\n",
192 src_vmp
->vm_endpoint
, src_vmp
->vm_slot
,
193 dst_vmp
->vm_endpoint
, dst_vmp
->vm_slot
);
196 /* Save existing data. */
197 orig_src_vmproc
= *src_vmp
;
198 orig_dst_vmproc
= *dst_vmp
;
201 *src_vmp
= orig_dst_vmproc
;
202 *dst_vmp
= orig_src_vmproc
;
204 /* Preserve endpoints and slot numbers. */
205 src_vmp
->vm_endpoint
= orig_src_vmproc
.vm_endpoint
;
206 src_vmp
->vm_slot
= orig_src_vmproc
.vm_slot
;
207 dst_vmp
->vm_endpoint
= orig_dst_vmproc
.vm_endpoint
;
208 dst_vmp
->vm_slot
= orig_dst_vmproc
.vm_slot
;
211 printf("VM: swap_proc: swapped %d (%d) and %d (%d)\n",
212 src_vmp
->vm_endpoint
, src_vmp
->vm_slot
,
213 dst_vmp
->vm_endpoint
, dst_vmp
->vm_slot
);
219 /*===========================================================================*
220 * swap_proc_dyn_data *
221 *===========================================================================*/
222 int swap_proc_dyn_data(struct vmproc
*src_vmp
, struct vmproc
*dst_vmp
)
227 is_vm
= (dst_vmp
->vm_endpoint
== VM_PROC_NR
);
229 /* For VM, transfer memory regions above the stack first. */
232 printf("VM: swap_proc_dyn_data: tranferring regions above the stack from old VM (%d) to new VM (%d)\n",
233 src_vmp
->vm_endpoint
, dst_vmp
->vm_endpoint
);
235 r
= pt_map_in_range(src_vmp
, dst_vmp
, VM_STACKTOP
, 0);
237 printf("swap_proc_dyn_data: pt_map_in_range failed\n");
243 printf("VM: swap_proc_dyn_data: swapping regions' parents for %d (%d) and %d (%d)\n",
244 src_vmp
->vm_endpoint
, src_vmp
->vm_slot
,
245 dst_vmp
->vm_endpoint
, dst_vmp
->vm_slot
);
248 /* Swap vir_regions' parents. */
249 map_setparent(src_vmp
);
250 map_setparent(dst_vmp
);
252 /* For regular processes, transfer regions above the stack now.
253 * In case of rollback, we need to skip this step. To sandbox the
254 * new instance and prevent state corruption on rollback, we share all
255 * the regions between the two instances as COW.
258 struct vir_region
*vr
;
259 vr
= map_lookup(dst_vmp
, VM_STACKTOP
, NULL
);
260 if(vr
&& !map_lookup(src_vmp
, VM_STACKTOP
, NULL
)) {
262 printf("VM: swap_proc_dyn_data: tranferring regions above the stack from %d to %d\n",
263 src_vmp
->vm_endpoint
, dst_vmp
->vm_endpoint
);
265 r
= map_proc_copy_from(src_vmp
, dst_vmp
, vr
);
275 void *mmap(void *addr
, size_t len
, int f
, int f2
, int f3
, off_t o
)
281 assert(!(len
% VM_PAGE_SIZE
));
283 ret
= vm_allocpages(&p
, VMP_SLAB
, len
/VM_PAGE_SIZE
);
285 if(!ret
) return MAP_FAILED
;
290 int munmap(void * addr
, size_t len
)
292 vm_freepages((vir_bytes
) addr
, roundup(len
, VM_PAGE_SIZE
)/VM_PAGE_SIZE
);
298 /* brk is a special case function to allow vm itself to
299 allocate memory in it's own (cacheable) HEAP */
300 vir_bytes target
= roundup((vir_bytes
)addr
, VM_PAGE_SIZE
), v
;
302 extern char *_brksize
;
303 static vir_bytes prevbrk
= (vir_bytes
) &_end
;
304 struct vmproc
*vmprocess
= &vmproc
[VM_PROC_NR
];
306 for(v
= roundup(prevbrk
, VM_PAGE_SIZE
); v
< target
;
308 phys_bytes mem
, newpage
= alloc_mem(1, 0);
309 if(newpage
== NO_MEM
) return -1;
310 mem
= CLICK2ABS(newpage
);
311 if(pt_writemap(vmprocess
, &vmprocess
->vm_pt
,
312 v
, mem
, VM_PAGE_SIZE
,
320 free_mem(newpage
, 1);
323 prevbrk
= v
+ VM_PAGE_SIZE
;
326 _brksize
= (char *) addr
;
328 if(sys_vmctl(SELF
, VMCTL_FLUSHTLB
, 0) != OK
)
329 panic("flushtlb failed");
334 /*===========================================================================*
336 *===========================================================================*/
337 int do_getrusage(message
*m
)
341 struct rusage r_usage
;
342 if ((res
= vm_isokendpt(m
->m_source
, &slot
)) != OK
)
347 if ((res
= sys_datacopy(m
->m_source
, m
->m_lc_vm_rusage
.addr
,
348 SELF
, (vir_bytes
) &r_usage
, (vir_bytes
) sizeof(r_usage
))) < 0)
351 r_usage
.ru_maxrss
= vmp
->vm_total_max
;
352 r_usage
.ru_minflt
= vmp
->vm_minor_page_fault
;
353 r_usage
.ru_majflt
= vmp
->vm_major_page_fault
;
355 return sys_datacopy(SELF
, (vir_bytes
) &r_usage
, m
->m_source
,
356 m
->m_lc_vm_rusage
.addr
, (vir_bytes
) sizeof(r_usage
));