vm, kernel, top: report memory usage of vm, kernel
[minix.git] / servers / vm / main.c
blob25a0b862dd2b08b06acac06a7a55d8fa222f0259
2 #define _POSIX_SOURCE 1
3 #define _MINIX 1
4 #define _SYSTEM 1
6 #include <minix/callnr.h>
7 #include <minix/com.h>
8 #include <minix/config.h>
9 #include <minix/const.h>
10 #include <minix/ds.h>
11 #include <minix/endpoint.h>
12 #include <minix/keymap.h>
13 #include <minix/minlib.h>
14 #include <minix/type.h>
15 #include <minix/ipc.h>
16 #include <minix/sysutil.h>
17 #include <minix/syslib.h>
18 #include <minix/const.h>
19 #include <minix/bitmap.h>
20 #include <minix/crtso.h>
21 #include <minix/rs.h>
23 #include <libexec.h>
24 #include <ctype.h>
25 #include <errno.h>
26 #include <string.h>
27 #include <env.h>
28 #include <stdio.h>
29 #include <assert.h>
31 #include <memory.h>
33 #define _MAIN 1
34 #include "glo.h"
35 #include "proto.h"
36 #include "util.h"
37 #include "vm.h"
38 #include "sanitycheck.h"
40 extern int missing_spares;
42 #include <machine/archtypes.h>
43 #include <sys/param.h>
44 #include "kernel/const.h"
45 #include "kernel/config.h"
46 #include "kernel/proc.h"
48 #include <signal.h>
50 /* Table of calls and a macro to test for being in range. */
51 struct {
52 int (*vmc_func)(message *); /* Call handles message. */
53 char *vmc_name; /* Human-readable string. */
54 } vm_calls[NR_VM_CALLS];
56 /* Macro to verify call range and map 'high' range to 'base' range
57 * (starting at 0) in one. Evaluates to zero-based call number if call
58 * number is valid, returns -1 otherwise.
60 #define CALLNUMBER(c) (((c) >= VM_RQ_BASE && \
61 (c) < VM_RQ_BASE + ELEMENTS(vm_calls)) ? \
62 ((c) - VM_RQ_BASE) : -1)
64 static int map_service(struct rprocpub *rpub);
65 static int vm_acl_ok(endpoint_t caller, int call);
66 static int do_rs_init(message *m);
68 /* SEF functions and variables. */
69 static void sef_cb_signal_handler(int signo);
71 void init_vm(void);
73 /*===========================================================================*
74 * main *
75 *===========================================================================*/
76 int main(void)
78 message msg;
79 int result, who_e, rcv_sts;
80 int caller_slot;
82 /* Initialize system so that all processes are runnable */
83 init_vm();
85 /* Register init callbacks. */
86 sef_setcb_init_restart(sef_cb_init_fail);
87 sef_setcb_signal_handler(sef_cb_signal_handler);
89 /* Let SEF perform startup. */
90 sef_startup();
92 SANITYCHECK(SCL_TOP);
94 /* This is VM's main loop. */
95 while (TRUE) {
96 int r, c;
98 SANITYCHECK(SCL_TOP);
99 if(missing_spares > 0) {
100 pt_cycle(); /* pagetable code wants to be called */
103 if ((r=sef_receive_status(ANY, &msg, &rcv_sts)) != OK)
104 panic("sef_receive_status() error: %d", r);
106 if (is_ipc_notify(rcv_sts)) {
107 /* Unexpected notify(). */
108 printf("VM: ignoring notify() from %d\n", msg.m_source);
109 continue;
111 who_e = msg.m_source;
112 if(vm_isokendpt(who_e, &caller_slot) != OK)
113 panic("invalid caller %d", who_e);
114 c = CALLNUMBER(msg.m_type);
115 result = ENOSYS; /* Out of range or restricted calls return this. */
117 if(msg.m_type == RS_INIT && msg.m_source == RS_PROC_NR) {
118 result = do_rs_init(&msg);
119 } else if (msg.m_type == VM_PAGEFAULT) {
120 if (!IPC_STATUS_FLAGS_TEST(rcv_sts, IPC_FLG_MSG_FROM_KERNEL)) {
121 printf("VM: process %d faked VM_PAGEFAULT "
122 "message!\n", msg.m_source);
124 do_pagefaults(&msg);
125 pt_clearmapcache();
127 * do not reply to this call, the caller is unblocked by
128 * a sys_vmctl() call in do_pagefaults if success. VM panics
129 * otherwise
131 continue;
132 } else if(c < 0 || !vm_calls[c].vmc_func) {
133 /* out of range or missing callnr */
134 } else {
135 if (vm_acl_ok(who_e, c) != OK) {
136 printf("VM: unauthorized %s by %d\n",
137 vm_calls[c].vmc_name, who_e);
138 } else {
139 SANITYCHECK(SCL_FUNCTIONS);
140 result = vm_calls[c].vmc_func(&msg);
141 SANITYCHECK(SCL_FUNCTIONS);
145 /* Send reply message, unless the return code is SUSPEND,
146 * which is a pseudo-result suppressing the reply message.
148 if(result != SUSPEND) {
149 msg.m_type = result;
150 if((r=send(who_e, &msg)) != OK) {
151 printf("VM: couldn't send %d to %d (err %d)\n",
152 msg.m_type, who_e, r);
153 panic("send() error");
157 return(OK);
160 static int do_rs_init(message *m)
162 int s, i;
163 static struct rprocpub rprocpub[NR_BOOT_PROCS];
165 /* Map all the services in the boot image. */
166 if((s = sys_safecopyfrom(RS_PROC_NR, m->RS_INIT_RPROCTAB_GID, 0,
167 (vir_bytes) rprocpub, sizeof(rprocpub))) != OK) {
168 panic("vm: sys_safecopyfrom (rs) failed: %d", s);
171 for(i=0;i < NR_BOOT_PROCS;i++) {
172 if(rprocpub[i].in_use) {
173 if((s = map_service(&rprocpub[i])) != OK) {
174 panic("unable to map service: %d", s);
179 /* RS expects this response that it then again wants to reply to: */
180 m->RS_INIT_RESULT = OK;
181 sendrec(RS_PROC_NR, m);
183 return(SUSPEND);
186 struct vmproc *init_proc(endpoint_t ep_nr)
188 static struct boot_image *ip;
190 for (ip = &kernel_boot_info.boot_procs[0];
191 ip < &kernel_boot_info.boot_procs[NR_BOOT_PROCS]; ip++) {
192 struct vmproc *vmp;
194 if(ip->proc_nr != ep_nr) continue;
196 if(ip->proc_nr >= _NR_PROCS || ip->proc_nr < 0)
197 panic("proc: %d", ip->proc_nr);
199 vmp = &vmproc[ip->proc_nr];
200 assert(!(vmp->vm_flags & VMF_INUSE)); /* no double procs */
201 clear_proc(vmp);
202 vmp->vm_flags = VMF_INUSE;
203 vmp->vm_endpoint = ip->endpoint;
204 vmp->vm_boot = ip;
206 return vmp;
209 panic("no init_proc");
212 struct vm_exec_info {
213 struct exec_info execi;
214 struct boot_image *ip;
215 struct vmproc *vmp;
218 static int libexec_copy_physcopy(struct exec_info *execi,
219 off_t off, off_t vaddr, size_t len)
221 vir_bytes end;
222 struct vm_exec_info *ei = execi->opaque;
223 end = ei->ip->start_addr + ei->ip->len;
224 assert(ei->ip->start_addr + off + len <= end);
225 return sys_physcopy(NONE, ei->ip->start_addr + off,
226 execi->proc_e, vaddr, len);
229 static void boot_alloc(struct exec_info *execi, off_t vaddr,
230 size_t len, int flags)
232 struct vmproc *vmp = ((struct vm_exec_info *) execi->opaque)->vmp;
234 if(!(map_page_region(vmp, vaddr, 0,
235 len, MAP_NONE, VR_ANON | VR_WRITABLE | VR_UNINITIALIZED, flags))) {
236 panic("VM: exec: map_page_region for boot process failed");
240 static int libexec_alloc_vm_prealloc(struct exec_info *execi,
241 off_t vaddr, size_t len)
243 boot_alloc(execi, vaddr, len, MF_PREALLOC);
244 return OK;
247 static int libexec_alloc_vm_ondemand(struct exec_info *execi,
248 off_t vaddr, size_t len)
250 boot_alloc(execi, vaddr, len, 0);
251 return OK;
254 void exec_bootproc(struct vmproc *vmp, struct boot_image *ip)
256 struct vm_exec_info vmexeci;
257 struct exec_info *execi = &vmexeci.execi;
258 char hdr[VM_PAGE_SIZE];
260 memset(&vmexeci, 0, sizeof(vmexeci));
262 if(pt_new(&vmp->vm_pt) != OK)
263 panic("VM: no new pagetable");
265 if(pt_bind(&vmp->vm_pt, vmp) != OK)
266 panic("VM: pt_bind failed");
268 if(sys_physcopy(NONE, ip->start_addr, SELF,
269 (vir_bytes) hdr, sizeof(hdr)) != OK)
270 panic("can't look at boot proc header");
272 execi->stack_high = kernel_boot_info.user_sp;
273 execi->stack_size = DEFAULT_STACK_LIMIT;
274 execi->proc_e = vmp->vm_endpoint;
275 execi->hdr = hdr;
276 execi->hdr_len = sizeof(hdr);
277 strlcpy(execi->progname, ip->proc_name, sizeof(execi->progname));
278 execi->frame_len = 0;
279 execi->opaque = &vmexeci;
281 vmexeci.ip = ip;
282 vmexeci.vmp = vmp;
284 /* callback functions and data */
285 execi->copymem = libexec_copy_physcopy;
286 execi->clearproc = NULL;
287 execi->clearmem = libexec_clear_sys_memset;
288 execi->allocmem_prealloc = libexec_alloc_vm_prealloc;
289 execi->allocmem_ondemand = libexec_alloc_vm_ondemand;
291 if(libexec_load_elf(execi) != OK)
292 panic("vm: boot process load of %d failed\n", vmp->vm_endpoint);
294 if(sys_exec(vmp->vm_endpoint, (char *) execi->stack_high - 12,
295 (char *) ip->proc_name, execi->pc) != OK)
296 panic("vm: boot process exec of %d failed\n", vmp->vm_endpoint);
299 void init_vm(void)
301 int s, i;
302 static struct memory mem_chunks[NR_MEMS];
303 static struct boot_image *ip;
305 #if SANITYCHECKS
306 incheck = nocheck = 0;
307 #endif
309 /* Retrieve various crucial boot parameters */
310 if(OK != (s=sys_getkinfo(&kernel_boot_info))) {
311 panic("couldn't get bootinfo: %d", s);
314 /* Sanity check */
315 assert(kernel_boot_info.mmap_size > 0);
316 assert(kernel_boot_info.mods_with_kernel > 0);
318 #if SANITYCHECKS
319 env_parse("vm_sanitychecklevel", "d", 0, &vm_sanitychecklevel, 0, SCL_MAX);
320 #endif
322 /* Get chunks of available memory. */
323 get_mem_chunks(mem_chunks);
325 /* Set table to 0. This invalidates all slots (clear VMF_INUSE). */
326 memset(vmproc, 0, sizeof(vmproc));
328 for(i = 0; i < ELEMENTS(vmproc); i++) {
329 vmproc[i].vm_slot = i;
332 /* region management initialization. */
333 map_region_init();
335 /* Initialize tables to all physical memory. */
336 mem_init(mem_chunks);
338 /* Architecture-dependent initialization. */
339 init_proc(VM_PROC_NR);
340 pt_init();
342 /* Give these processes their own page table. */
343 for (ip = &kernel_boot_info.boot_procs[0];
344 ip < &kernel_boot_info.boot_procs[NR_BOOT_PROCS]; ip++) {
345 struct vmproc *vmp;
347 if(ip->proc_nr < 0) continue;
349 assert(ip->start_addr);
351 /* VM has already been set up by the kernel and pt_init().
352 * Any other boot process is already in memory and is set up
353 * here.
355 if(ip->proc_nr == VM_PROC_NR) continue;
357 vmp = init_proc(ip->proc_nr);
359 exec_bootproc(vmp, ip);
361 /* Free the file blob */
362 assert(!(ip->start_addr % VM_PAGE_SIZE));
363 ip->len = roundup(ip->len, VM_PAGE_SIZE);
364 free_mem(ABS2CLICK(ip->start_addr), ABS2CLICK(ip->len));
367 /* Set up table of calls. */
368 #define CALLMAP(code, func) { int i; \
369 i=CALLNUMBER(code); \
370 assert(i >= 0); \
371 assert(i < NR_VM_CALLS); \
372 vm_calls[i].vmc_func = (func); \
373 vm_calls[i].vmc_name = #code; \
376 /* Set call table to 0. This invalidates all calls (clear
377 * vmc_func).
379 memset(vm_calls, 0, sizeof(vm_calls));
381 /* Basic VM calls. */
382 CALLMAP(VM_MMAP, do_mmap);
383 CALLMAP(VM_MUNMAP, do_munmap);
384 CALLMAP(VM_MAP_PHYS, do_map_phys);
385 CALLMAP(VM_UNMAP_PHYS, do_unmap_phys);
387 /* Calls from PM. */
388 CALLMAP(VM_EXIT, do_exit);
389 CALLMAP(VM_FORK, do_fork);
390 CALLMAP(VM_BRK, do_brk);
391 CALLMAP(VM_WILLEXIT, do_willexit);
392 CALLMAP(VM_NOTIFY_SIG, do_notify_sig);
394 /* Calls from RS */
395 CALLMAP(VM_RS_SET_PRIV, do_rs_set_priv);
396 CALLMAP(VM_RS_UPDATE, do_rs_update);
397 CALLMAP(VM_RS_MEMCTL, do_rs_memctl);
399 /* Calls from RS/VFS */
400 CALLMAP(VM_PROCCTL, do_procctl);
402 /* Generic calls. */
403 CALLMAP(VM_REMAP, do_remap);
404 CALLMAP(VM_REMAP_RO, do_remap);
405 CALLMAP(VM_GETPHYS, do_get_phys);
406 CALLMAP(VM_SHM_UNMAP, do_shared_unmap);
407 CALLMAP(VM_GETREF, do_get_refcount);
408 CALLMAP(VM_INFO, do_info);
409 CALLMAP(VM_QUERY_EXIT, do_query_exit);
410 CALLMAP(VM_WATCH_EXIT, do_watch_exit);
411 CALLMAP(VM_FORGETBLOCKS, do_forgetblocks);
412 CALLMAP(VM_FORGETBLOCK, do_forgetblock);
413 CALLMAP(VM_YIELDBLOCKGETBLOCK, do_yieldblockgetblock);
415 /* Initialize the structures for queryexit */
416 init_query_exit();
419 /*===========================================================================*
420 * sef_cb_signal_handler *
421 *===========================================================================*/
422 static void sef_cb_signal_handler(int signo)
424 /* Check for known kernel signals, ignore anything else. */
425 switch(signo) {
426 /* There is a pending memory request from the kernel. */
427 case SIGKMEM:
428 do_memory();
429 break;
432 /* It can happen that we get stuck receiving signals
433 * without sef_receive() returning. We could need more memory
434 * though.
436 if(missing_spares > 0) {
437 pt_cycle(); /* pagetable code wants to be called */
440 pt_clearmapcache();
443 /*===========================================================================*
444 * map_service *
445 *===========================================================================*/
446 static int map_service(rpub)
447 struct rprocpub *rpub;
449 /* Map a new service by initializing its call mask. */
450 int r, proc_nr;
452 if ((r = vm_isokendpt(rpub->endpoint, &proc_nr)) != OK) {
453 return r;
456 /* Copy the call mask. */
457 memcpy(&vmproc[proc_nr].vm_call_mask, &rpub->vm_call_mask,
458 sizeof(vmproc[proc_nr].vm_call_mask));
460 return(OK);
463 /*===========================================================================*
464 * vm_acl_ok *
465 *===========================================================================*/
466 static int vm_acl_ok(endpoint_t caller, int call)
468 int n, r;
470 if ((r = vm_isokendpt(caller, &n)) != OK)
471 panic("VM: from strange source: %d", caller);
473 /* See if the call is allowed. */
474 if (!GET_BIT(vmproc[n].vm_call_mask, call)) {
475 return EPERM;
478 return OK;