tools/llvm: Do not build with symbols
[minix3.git] / minix / servers / vm / utility.c
blob17edbe2c11c03840cf80c96deba54901dedc241f
2 /* This file contains some utility routines for VM. */
4 #define _SYSTEM 1
6 #define brk _brk /* get rid of no previous prototype warning */
8 #include <minix/callnr.h>
9 #include <minix/com.h>
10 #include <minix/config.h>
11 #include <minix/const.h>
12 #include <minix/ds.h>
13 #include <minix/endpoint.h>
14 #include <minix/minlib.h>
15 #include <minix/type.h>
16 #include <minix/ipc.h>
17 #include <minix/sysutil.h>
18 #include <minix/syslib.h>
19 #include <minix/type.h>
20 #include <minix/bitmap.h>
21 #include <string.h>
22 #include <errno.h>
23 #include <env.h>
24 #include <unistd.h>
25 #include <assert.h>
26 #include <sys/param.h>
27 #include <sys/mman.h>
28 #include <sys/resource.h>
30 #include "proto.h"
31 #include "glo.h"
32 #include "util.h"
33 #include "region.h"
34 #include "sanitycheck.h"
36 #include <machine/archtypes.h>
37 #include "kernel/const.h"
38 #include "kernel/config.h"
39 #include "kernel/type.h"
40 #include "kernel/proc.h"
42 /*===========================================================================*
43 * get_mem_chunks *
44 *===========================================================================*/
45 void get_mem_chunks(
46 struct memory *mem_chunks) /* store mem chunks here */
48 /* Initialize the free memory list from the kernel-provided memory map. Translate
49 * the byte offsets and sizes in this list to clicks, properly truncated.
51 phys_bytes base, size, limit;
52 int i;
53 struct memory *memp;
55 /* Initialize everything to zero. */
56 memset(mem_chunks, 0, NR_MEMS*sizeof(*mem_chunks));
58 /* Obtain and parse memory from kernel environment. */
59 /* XXX Any memory chunk in excess of NR_MEMS is silently ignored. */
60 for(i = 0; i < MIN(MAXMEMMAP, NR_MEMS); i++) {
61 mem_chunks[i].base = kernel_boot_info.memmap[i].mm_base_addr;
62 mem_chunks[i].size = kernel_boot_info.memmap[i].mm_length;
65 /* Round physical memory to clicks. Round start up, round end down. */
66 for (i = 0; i < NR_MEMS; i++) {
67 memp = &mem_chunks[i]; /* next mem chunk is stored here */
68 base = mem_chunks[i].base;
69 size = mem_chunks[i].size;
70 limit = base + size;
71 base = (phys_bytes) (CLICK_CEIL(base));
72 limit = (phys_bytes) (CLICK_FLOOR(limit));
73 if (limit <= base) {
74 memp->base = memp->size = 0;
75 } else {
76 memp->base = base >> CLICK_SHIFT;
77 memp->size = (limit - base) >> CLICK_SHIFT;
82 /*===========================================================================*
83 * vm_isokendpt *
84 *===========================================================================*/
85 int vm_isokendpt(endpoint_t endpoint, int *procn)
87 *procn = _ENDPOINT_P(endpoint);
88 if(*procn < 0 || *procn >= NR_PROCS)
89 return EINVAL;
90 if(*procn >= 0 && endpoint != vmproc[*procn].vm_endpoint)
91 return EDEADEPT;
92 if(*procn >= 0 && !(vmproc[*procn].vm_flags & VMF_INUSE))
93 return EDEADEPT;
94 return OK;
98 /*===========================================================================*
99 * do_info *
100 *===========================================================================*/
101 int do_info(message *m)
103 struct vm_stats_info vsi;
104 struct vm_usage_info vui;
105 static struct vm_region_info vri[MAX_VRI_COUNT];
106 struct vmproc *vmp;
107 vir_bytes addr, size, next, ptr;
108 int r, pr, dummy, count, free_pages, largest_contig;
110 if (vm_isokendpt(m->m_source, &pr) != OK)
111 return EINVAL;
112 vmp = &vmproc[pr];
114 ptr = (vir_bytes) m->m_lsys_vm_info.ptr;
116 switch(m->m_lsys_vm_info.what) {
117 case VMIW_STATS:
118 vsi.vsi_pagesize = VM_PAGE_SIZE;
119 vsi.vsi_total = total_pages;
120 memstats(&dummy, &free_pages, &largest_contig);
121 vsi.vsi_free = free_pages;
122 vsi.vsi_largest = largest_contig;
124 get_stats_info(&vsi);
126 addr = (vir_bytes) &vsi;
127 size = sizeof(vsi);
129 break;
131 case VMIW_USAGE:
132 if(m->m_lsys_vm_info.ep < 0)
133 get_usage_info_kernel(&vui);
134 else if (vm_isokendpt(m->m_lsys_vm_info.ep, &pr) != OK)
135 return EINVAL;
136 else get_usage_info(&vmproc[pr], &vui);
138 addr = (vir_bytes) &vui;
139 size = sizeof(vui);
141 break;
143 case VMIW_REGION:
144 if (vm_isokendpt(m->m_lsys_vm_info.ep, &pr) != OK)
145 return EINVAL;
147 count = MIN(m->m_lsys_vm_info.count, MAX_VRI_COUNT);
148 next = m->m_lsys_vm_info.next;
150 count = get_region_info(&vmproc[pr], vri, count, &next);
152 m->m_lsys_vm_info.count = count;
153 m->m_lsys_vm_info.next = next;
155 addr = (vir_bytes) vri;
156 size = sizeof(vri[0]) * count;
158 break;
160 default:
161 return EINVAL;
164 if (size == 0)
165 return OK;
167 /* Make sure that no page faults can occur while copying out. A page
168 * fault would cause the kernel to send a notify to us, while we would
169 * be waiting for the result of the copy system call, resulting in a
170 * deadlock. Note that no memory mapping can be undone without the
171 * involvement of VM, so we are safe until we're done.
173 r = handle_memory_once(vmp, ptr, size, 1 /*wrflag*/);
174 if (r != OK) return r;
176 /* Now that we know the copy out will succeed, perform the actual copy
177 * operation.
179 return sys_datacopy(SELF, addr,
180 (vir_bytes) vmp->vm_endpoint, ptr, size);
183 /*===========================================================================*
184 * swap_proc_slot *
185 *===========================================================================*/
186 int swap_proc_slot(struct vmproc *src_vmp, struct vmproc *dst_vmp)
188 struct vmproc orig_src_vmproc, orig_dst_vmproc;
190 #if LU_DEBUG
191 printf("VM: swap_proc: swapping %d (%d) and %d (%d)\n",
192 src_vmp->vm_endpoint, src_vmp->vm_slot,
193 dst_vmp->vm_endpoint, dst_vmp->vm_slot);
194 #endif
196 /* Save existing data. */
197 orig_src_vmproc = *src_vmp;
198 orig_dst_vmproc = *dst_vmp;
200 /* Swap slots. */
201 *src_vmp = orig_dst_vmproc;
202 *dst_vmp = orig_src_vmproc;
204 /* Preserve endpoints and slot numbers. */
205 src_vmp->vm_endpoint = orig_src_vmproc.vm_endpoint;
206 src_vmp->vm_slot = orig_src_vmproc.vm_slot;
207 dst_vmp->vm_endpoint = orig_dst_vmproc.vm_endpoint;
208 dst_vmp->vm_slot = orig_dst_vmproc.vm_slot;
210 #if LU_DEBUG
211 printf("VM: swap_proc: swapped %d (%d) and %d (%d)\n",
212 src_vmp->vm_endpoint, src_vmp->vm_slot,
213 dst_vmp->vm_endpoint, dst_vmp->vm_slot);
214 #endif
216 return OK;
219 /*===========================================================================*
220 * swap_proc_dyn_data *
221 *===========================================================================*/
222 int swap_proc_dyn_data(struct vmproc *src_vmp, struct vmproc *dst_vmp)
224 int is_vm;
225 int r;
227 is_vm = (dst_vmp->vm_endpoint == VM_PROC_NR);
229 /* For VM, transfer memory regions above the stack first. */
230 if(is_vm) {
231 #if LU_DEBUG
232 printf("VM: swap_proc_dyn_data: tranferring regions above the stack from old VM (%d) to new VM (%d)\n",
233 src_vmp->vm_endpoint, dst_vmp->vm_endpoint);
234 #endif
235 r = pt_map_in_range(src_vmp, dst_vmp, VM_STACKTOP, 0);
236 if(r != OK) {
237 printf("swap_proc_dyn_data: pt_map_in_range failed\n");
238 return r;
242 #if LU_DEBUG
243 printf("VM: swap_proc_dyn_data: swapping regions' parents for %d (%d) and %d (%d)\n",
244 src_vmp->vm_endpoint, src_vmp->vm_slot,
245 dst_vmp->vm_endpoint, dst_vmp->vm_slot);
246 #endif
248 /* Swap vir_regions' parents. */
249 map_setparent(src_vmp);
250 map_setparent(dst_vmp);
252 /* For regular processes, transfer regions above the stack now.
253 * In case of rollback, we need to skip this step. To sandbox the
254 * new instance and prevent state corruption on rollback, we share all
255 * the regions between the two instances as COW.
257 if(!is_vm) {
258 struct vir_region *vr;
259 vr = map_lookup(dst_vmp, VM_STACKTOP, NULL);
260 if(vr && !map_lookup(src_vmp, VM_STACKTOP, NULL)) {
261 #if LU_DEBUG
262 printf("VM: swap_proc_dyn_data: tranferring regions above the stack from %d to %d\n",
263 src_vmp->vm_endpoint, dst_vmp->vm_endpoint);
264 #endif
265 r = map_proc_copy_from(src_vmp, dst_vmp, vr);
266 if(r != OK) {
267 return r;
272 return OK;
275 void *mmap(void *addr, size_t len, int f, int f2, int f3, off_t o)
277 void *ret;
278 phys_bytes p;
280 assert(!addr);
281 assert(!(len % VM_PAGE_SIZE));
283 ret = vm_allocpages(&p, VMP_SLAB, len/VM_PAGE_SIZE);
285 if(!ret) return MAP_FAILED;
286 memset(ret, 0, len);
287 return ret;
290 int munmap(void * addr, size_t len)
292 vm_freepages((vir_bytes) addr, roundup(len, VM_PAGE_SIZE)/VM_PAGE_SIZE);
293 return 0;
296 int brk(void *addr)
298 /* brk is a special case function to allow vm itself to
299 allocate memory in it's own (cacheable) HEAP */
300 vir_bytes target = roundup((vir_bytes)addr, VM_PAGE_SIZE), v;
301 extern char _end;
302 extern char *_brksize;
303 static vir_bytes prevbrk = (vir_bytes) &_end;
304 struct vmproc *vmprocess = &vmproc[VM_PROC_NR];
306 for(v = roundup(prevbrk, VM_PAGE_SIZE); v < target;
307 v += VM_PAGE_SIZE) {
308 phys_bytes mem, newpage = alloc_mem(1, 0);
309 if(newpage == NO_MEM) return -1;
310 mem = CLICK2ABS(newpage);
311 if(pt_writemap(vmprocess, &vmprocess->vm_pt,
312 v, mem, VM_PAGE_SIZE,
313 ARCH_VM_PTE_PRESENT
314 | ARCH_VM_PTE_USER
315 | ARCH_VM_PTE_RW
316 #if defined(__arm__)
317 | ARM_VM_PTE_CACHED
318 #endif
319 , 0) != OK) {
320 free_mem(newpage, 1);
321 return -1;
323 prevbrk = v + VM_PAGE_SIZE;
326 _brksize = (char *) addr;
328 if(sys_vmctl(SELF, VMCTL_FLUSHTLB, 0) != OK)
329 panic("flushtlb failed");
331 return 0;
334 /*===========================================================================*
335 * do_getrusage *
336 *===========================================================================*/
337 int do_getrusage(message *m)
339 int res, slot;
340 struct vmproc *vmp;
341 struct rusage r_usage;
342 if ((res = vm_isokendpt(m->m_source, &slot)) != OK)
343 return ESRCH;
345 vmp = &vmproc[slot];
347 if ((res = sys_datacopy(m->m_source, m->m_lc_vm_rusage.addr,
348 SELF, (vir_bytes) &r_usage, (vir_bytes) sizeof(r_usage))) < 0)
349 return res;
351 r_usage.ru_maxrss = vmp->vm_total_max;
352 r_usage.ru_minflt = vmp->vm_minor_page_fault;
353 r_usage.ru_majflt = vmp->vm_major_page_fault;
355 return sys_datacopy(SELF, (vir_bytes) &r_usage, m->m_source,
356 m->m_lc_vm_rusage.addr, (vir_bytes) sizeof(r_usage));