Improve the process for GNU tools
[minix3.git] / minix / servers / vm / main.c
blobd6f5d7b555c84eb25ba9672daaedbfe7f547d470
2 #define _SYSTEM 1
4 #include <minix/callnr.h>
5 #include <minix/com.h>
6 #include <minix/config.h>
7 #include <minix/const.h>
8 #include <minix/ds.h>
9 #include <minix/endpoint.h>
10 #include <minix/minlib.h>
11 #include <minix/type.h>
12 #include <minix/ipc.h>
13 #include <minix/sysutil.h>
14 #include <minix/syslib.h>
15 #include <minix/const.h>
16 #include <minix/bitmap.h>
17 #include <minix/rs.h>
18 #include <minix/vfsif.h>
20 #include <sys/exec.h>
22 #include <libexec.h>
23 #include <ctype.h>
24 #include <errno.h>
25 #include <string.h>
26 #include <env.h>
27 #include <stdio.h>
28 #include <assert.h>
30 #define _MAIN 1
31 #include "glo.h"
32 #include "proto.h"
33 #include "util.h"
34 #include "vm.h"
35 #include "sanitycheck.h"
37 extern int missing_spares;
39 #include <machine/archtypes.h>
40 #include <sys/param.h>
41 #include "kernel/const.h"
42 #include "kernel/config.h"
43 #include "kernel/proc.h"
45 #include <signal.h>
46 #include <lib.h>
48 /* Table of calls and a macro to test for being in range. */
49 struct {
50 int (*vmc_func)(message *); /* Call handles message. */
51 const char *vmc_name; /* Human-readable string. */
52 } vm_calls[NR_VM_CALLS];
54 /* Macro to verify call range and map 'high' range to 'base' range
55 * (starting at 0) in one. Evaluates to zero-based call number if call
56 * number is valid, returns -1 otherwise.
58 #define CALLNUMBER(c) (((c) >= VM_RQ_BASE && \
59 (c) < VM_RQ_BASE + ELEMENTS(vm_calls)) ? \
60 ((c) - VM_RQ_BASE) : -1)
62 static int map_service(struct rprocpub *rpub);
64 static struct rprocpub rprocpub[NR_SYS_PROCS];
65 int __vm_init_fresh;
67 /* SEF functions and variables. */
68 static void sef_local_startup(void);
69 static int sef_cb_init_lu_restart(int type, sef_init_info_t *info);
70 static int sef_cb_init_fresh(int type, sef_init_info_t *info);
71 static void sef_cb_signal_handler(int signo);
73 void init_vm(void);
75 int do_sef_init_request(message *);
77 /*===========================================================================*
78 * is_first_time *
79 *===========================================================================*/
80 static int is_first_time(void)
82 struct proc rs_proc;
83 int r;
85 if ((r = sys_getproc(&rs_proc, RS_PROC_NR)) != OK)
86 panic("VM: couldn't get RS process data: %d", r);
88 return RTS_ISSET(&rs_proc, RTS_BOOTINHIBIT);
91 /*===========================================================================*
92 * main *
93 *===========================================================================*/
94 int main(void)
96 message msg;
97 int result, who_e, rcv_sts;
98 int caller_slot;
100 /* Initialize system so that all processes are runnable the first time. */
101 if (is_first_time()) {
102 init_vm();
103 __vm_init_fresh=1;
106 /* SEF local startup. */
107 sef_local_startup();
108 __vm_init_fresh=0;
110 SANITYCHECK(SCL_TOP);
112 /* This is VM's main loop. */
113 while (TRUE) {
114 int r, c;
115 int type;
116 int transid = 0; /* VFS transid if any */
118 SANITYCHECK(SCL_TOP);
119 if(missing_spares > 0) {
120 alloc_cycle(); /* mem alloc code wants to be called */
123 if ((r=sef_receive_status(ANY, &msg, &rcv_sts)) != OK)
124 panic("sef_receive_status() error: %d", r);
126 if (is_ipc_notify(rcv_sts)) {
127 /* Unexpected ipc_notify(). */
128 printf("VM: ignoring ipc_notify() from %d\n", msg.m_source);
129 continue;
131 who_e = msg.m_source;
132 if(vm_isokendpt(who_e, &caller_slot) != OK)
133 panic("invalid caller %d", who_e);
135 /* We depend on this being false for the initialized value. */
136 assert(!IS_VFS_FS_TRANSID(transid));
138 type = msg.m_type;
139 c = CALLNUMBER(type);
140 result = ENOSYS; /* Out of range or restricted calls return this. */
142 transid = TRNS_GET_ID(msg.m_type);
144 if((msg.m_source == VFS_PROC_NR) && IS_VFS_FS_TRANSID(transid)) {
145 /* If it's a request from VFS, it might have a transaction id. */
146 msg.m_type = TRNS_DEL_ID(msg.m_type);
148 /* Calls that use the transid */
149 result = do_procctl(&msg, transid);
150 } else if(msg.m_type == RS_INIT && msg.m_source == RS_PROC_NR) {
151 result = do_sef_init_request(&msg);
152 if(result != OK) panic("do_sef_init_request failed!\n");
153 result = SUSPEND; /* do not reply to RS */
154 } else if (msg.m_type == VM_PAGEFAULT) {
155 if (!IPC_STATUS_FLAGS_TEST(rcv_sts, IPC_FLG_MSG_FROM_KERNEL)) {
156 printf("VM: process %d faked VM_PAGEFAULT "
157 "message!\n", msg.m_source);
159 do_pagefaults(&msg);
161 * do not reply to this call, the caller is unblocked by
162 * a sys_vmctl() call in do_pagefaults if success. VM panics
163 * otherwise
165 continue;
166 } else if(c < 0 || !vm_calls[c].vmc_func) {
167 /* out of range or missing callnr */
168 } else {
169 if (acl_check(&vmproc[caller_slot], c) != OK) {
170 printf("VM: unauthorized %s by %d\n",
171 vm_calls[c].vmc_name, who_e);
172 } else {
173 SANITYCHECK(SCL_FUNCTIONS);
174 result = vm_calls[c].vmc_func(&msg);
175 SANITYCHECK(SCL_FUNCTIONS);
179 /* Send reply message, unless the return code is SUSPEND,
180 * which is a pseudo-result suppressing the reply message.
182 if(result != SUSPEND) {
183 msg.m_type = result;
185 assert(!IS_VFS_FS_TRANSID(transid));
187 if((r=ipc_send(who_e, &msg)) != OK) {
188 printf("VM: couldn't send %d to %d (err %d)\n",
189 msg.m_type, who_e, r);
190 panic("ipc_send() error");
194 return(OK);
197 static void sef_cb_lu_state_changed(int old_state, int state)
199 /* Called whenever the live-update state changes. We need to restore certain
200 * state in the old VM instance after a live update has failed, because some
201 * but not all memory is shared between the two VM instances.
203 struct vmproc *vmp;
205 if (state == SEF_LU_STATE_NULL) {
206 /* Undo some of the changes that may have been made by the new VM
207 * instance. If the new VM instance is us, nothing happens.
209 vmp = &vmproc[VM_PROC_NR];
211 /* Rebind page tables. */
212 pt_bind(&vmp->vm_pt, vmp);
213 pt_clearmapcache();
215 /* Readjust process references. */
216 adjust_proc_refs();
220 static void sef_local_startup(void)
222 /* Register init callbacks. */
223 sef_setcb_init_fresh(sef_cb_init_fresh);
224 sef_setcb_init_lu(sef_cb_init_lu_restart);
225 sef_setcb_init_restart(sef_cb_init_lu_restart);
226 /* In order to avoid a deadlock at boot time, send the first RS_INIT
227 * reply to RS asynchronously. After that, use sendrec as usual.
229 if (__vm_init_fresh)
230 sef_setcb_init_response(sef_cb_init_response_rs_asyn_once);
232 /* Register live update callbacks. */
233 sef_setcb_lu_state_changed(sef_cb_lu_state_changed);
235 /* Register signal callbacks. */
236 sef_setcb_signal_handler(sef_cb_signal_handler);
238 /* Let SEF perform startup. */
239 sef_startup();
242 static int sef_cb_init_fresh(int type, sef_init_info_t *info)
244 int s, i;
246 /* Map all the services in the boot image. */
247 if((s = sys_safecopyfrom(RS_PROC_NR, info->rproctab_gid, 0,
248 (vir_bytes) rprocpub, sizeof(rprocpub))) != OK) {
249 panic("vm: sys_safecopyfrom (rs) failed: %d", s);
252 for(i=0;i < NR_BOOT_PROCS;i++) {
253 if(rprocpub[i].in_use) {
254 if((s = map_service(&rprocpub[i])) != OK) {
255 panic("unable to map service: %d", s);
260 return(OK);
263 static struct vmproc *init_proc(endpoint_t ep_nr)
265 struct boot_image *ip;
267 for (ip = &kernel_boot_info.boot_procs[0];
268 ip < &kernel_boot_info.boot_procs[NR_BOOT_PROCS]; ip++) {
269 struct vmproc *vmp;
271 if(ip->proc_nr != ep_nr) continue;
273 if(ip->proc_nr >= _NR_PROCS || ip->proc_nr < 0)
274 panic("proc: %d", ip->proc_nr);
276 vmp = &vmproc[ip->proc_nr];
277 assert(!(vmp->vm_flags & VMF_INUSE)); /* no double procs */
278 clear_proc(vmp);
279 vmp->vm_flags = VMF_INUSE;
280 vmp->vm_endpoint = ip->endpoint;
281 vmp->vm_boot = ip;
283 return vmp;
286 panic("no init_proc");
289 struct vm_exec_info {
290 struct exec_info execi;
291 struct boot_image *ip;
292 struct vmproc *vmp;
295 static int libexec_copy_physcopy(struct exec_info *execi,
296 off_t off, vir_bytes vaddr, size_t len)
298 vir_bytes end;
299 struct vm_exec_info *ei = execi->opaque;
300 end = ei->ip->start_addr + ei->ip->len;
301 assert(ei->ip->start_addr + off + len <= end);
302 return sys_physcopy(NONE, ei->ip->start_addr + off,
303 execi->proc_e, vaddr, len, 0);
306 static void boot_alloc(struct exec_info *execi, off_t vaddr,
307 size_t len, int flags)
309 struct vmproc *vmp = ((struct vm_exec_info *) execi->opaque)->vmp;
311 if(!(map_page_region(vmp, vaddr, 0, len,
312 VR_ANON | VR_WRITABLE | VR_UNINITIALIZED, flags,
313 &mem_type_anon))) {
314 panic("VM: exec: map_page_region for boot process failed");
318 static int libexec_alloc_vm_prealloc(struct exec_info *execi,
319 vir_bytes vaddr, size_t len)
321 boot_alloc(execi, vaddr, len, MF_PREALLOC);
322 return OK;
325 static int libexec_alloc_vm_ondemand(struct exec_info *execi,
326 vir_bytes vaddr, size_t len)
328 boot_alloc(execi, vaddr, len, 0);
329 return OK;
332 static void exec_bootproc(struct vmproc *vmp, struct boot_image *ip)
334 struct vm_exec_info vmexeci;
335 struct exec_info *execi = &vmexeci.execi;
336 /* libexec need proper alignment for casting to structures */
337 char hdr[VM_PAGE_SIZE] __aligned(8);
339 size_t frame_size = 0; /* Size of the new initial stack. */
340 int argc = 0; /* Argument count. */
341 int envc = 0; /* Environment count */
342 char overflow = 0; /* No overflow yet. */
343 struct ps_strings *psp;
345 int vsp = 0; /* (virtual) Stack pointer in new address space. */
346 char *argv[] = { ip->proc_name, NULL };
347 char *envp[] = { NULL };
348 char *path = ip->proc_name;
349 char frame[VM_PAGE_SIZE] __aligned(sizeof(void *));
351 memset(&vmexeci, 0, sizeof(vmexeci));
353 if(pt_new(&vmp->vm_pt) != OK)
354 panic("VM: no new pagetable");
356 if(pt_bind(&vmp->vm_pt, vmp) != OK)
357 panic("VM: pt_bind failed");
359 if(sys_physcopy(NONE, ip->start_addr, SELF,
360 (vir_bytes) hdr, sizeof(hdr), 0) != OK)
361 panic("can't look at boot proc header");
363 execi->stack_high = kernel_boot_info.user_sp;
364 execi->stack_size = DEFAULT_STACK_LIMIT;
365 execi->proc_e = vmp->vm_endpoint;
366 execi->hdr = hdr;
367 execi->hdr_len = sizeof(hdr);
368 strlcpy(execi->progname, ip->proc_name, sizeof(execi->progname));
369 execi->frame_len = 0;
370 execi->opaque = &vmexeci;
371 execi->filesize = ip->len;
373 vmexeci.ip = ip;
374 vmexeci.vmp = vmp;
376 /* callback functions and data */
377 execi->copymem = libexec_copy_physcopy;
378 execi->clearproc = NULL;
379 execi->clearmem = libexec_clear_sys_memset;
380 execi->allocmem_prealloc_junk = libexec_alloc_vm_prealloc;
381 execi->allocmem_prealloc_cleared = libexec_alloc_vm_prealloc;
382 execi->allocmem_ondemand = libexec_alloc_vm_ondemand;
384 if (libexec_load_elf(execi) != OK)
385 panic("vm: boot process load of process %s (ep=%d) failed\n",
386 execi->progname, vmp->vm_endpoint);
388 /* Setup a minimal stack. */
389 minix_stack_params(path, argv, envp, &frame_size, &overflow, &argc,
390 &envc);
392 /* The party is off if there is an overflow, or it is too big for our
393 * pre-allocated space. */
394 if(overflow || frame_size > sizeof(frame))
395 panic("vm: could not alloc stack for boot process %s (ep=%d)\n",
396 execi->progname, vmp->vm_endpoint);
398 minix_stack_fill(path, argc, argv, envc, envp, frame_size, frame, &vsp,
399 &psp);
401 if(handle_memory_once(vmp, vsp, frame_size, 1) != OK)
402 panic("vm: could not map stack for boot process %s (ep=%d)\n",
403 execi->progname, vmp->vm_endpoint);
405 if(sys_datacopy(SELF, (vir_bytes)frame, vmp->vm_endpoint, vsp, frame_size) != OK)
406 panic("vm: could not copy stack for boot process %s (ep=%d)\n",
407 execi->progname, vmp->vm_endpoint);
409 if(sys_exec(vmp->vm_endpoint, (vir_bytes)vsp,
410 (vir_bytes)execi->progname, execi->pc,
411 vsp + ((int)psp - (int)frame)) != OK)
412 panic("vm: boot process exec of process %s (ep=%d) failed\n",
413 execi->progname,vmp->vm_endpoint);
415 /* make it runnable */
416 if(sys_vmctl(vmp->vm_endpoint, VMCTL_BOOTINHIBIT_CLEAR, 0) != OK)
417 panic("VMCTL_BOOTINHIBIT_CLEAR failed");
420 static int do_procctl_notrans(message *msg)
422 int transid = 0;
424 assert(!IS_VFS_FS_TRANSID(transid));
426 return do_procctl(msg, transid);
429 void init_vm(void)
431 int s, i;
432 static struct memory mem_chunks[NR_MEMS];
433 struct boot_image *ip;
434 extern void __minix_init(void);
435 multiboot_module_t *mod;
436 vir_bytes kern_dyn, kern_static;
438 #if SANITYCHECKS
439 incheck = nocheck = 0;
440 #endif
442 /* Retrieve various crucial boot parameters */
443 if(OK != (s=sys_getkinfo(&kernel_boot_info))) {
444 panic("couldn't get bootinfo: %d", s);
447 /* Turn file mmap on? */
448 enable_filemap=1; /* yes by default */
449 env_parse("filemap", "d", 0, &enable_filemap, 0, 1);
451 /* Sanity check */
452 assert(kernel_boot_info.mmap_size > 0);
453 assert(kernel_boot_info.mods_with_kernel > 0);
455 /* Get chunks of available memory. */
456 get_mem_chunks(mem_chunks);
458 /* Set table to 0. This invalidates all slots (clear VMF_INUSE). */
459 memset(vmproc, 0, sizeof(vmproc));
461 for(i = 0; i < ELEMENTS(vmproc); i++) {
462 vmproc[i].vm_slot = i;
465 /* Initialize ACL data structures. */
466 acl_init();
468 /* region management initialization. */
469 map_region_init();
471 /* Initialize tables to all physical memory. */
472 mem_init(mem_chunks);
474 /* Architecture-dependent initialization. */
475 init_proc(VM_PROC_NR);
476 pt_init();
478 /* Acquire kernel ipc vectors that weren't available
479 * before VM had determined kernel mappings
481 __minix_init();
483 /* The kernel's freelist does not include boot-time modules; let
484 * the allocator know that the total memory is bigger.
486 for (mod = &kernel_boot_info.module_list[0];
487 mod < &kernel_boot_info.module_list[kernel_boot_info.mods_with_kernel-1]; mod++) {
488 phys_bytes len = mod->mod_end-mod->mod_start+1;
489 len = roundup(len, VM_PAGE_SIZE);
490 mem_add_total_pages(len/VM_PAGE_SIZE);
493 kern_dyn = kernel_boot_info.kernel_allocated_bytes_dynamic;
494 kern_static = kernel_boot_info.kernel_allocated_bytes;
495 kern_static = roundup(kern_static, VM_PAGE_SIZE);
496 mem_add_total_pages((kern_dyn + kern_static)/VM_PAGE_SIZE);
498 /* Give these processes their own page table. */
499 for (ip = &kernel_boot_info.boot_procs[0];
500 ip < &kernel_boot_info.boot_procs[NR_BOOT_PROCS]; ip++) {
501 struct vmproc *vmp;
503 if(ip->proc_nr < 0) continue;
505 assert(ip->start_addr);
507 /* VM has already been set up by the kernel and pt_init().
508 * Any other boot process is already in memory and is set up
509 * here.
511 if(ip->proc_nr == VM_PROC_NR) continue;
513 vmp = init_proc(ip->proc_nr);
515 exec_bootproc(vmp, ip);
517 /* Free the file blob */
518 assert(!(ip->start_addr % VM_PAGE_SIZE));
519 ip->len = roundup(ip->len, VM_PAGE_SIZE);
520 free_mem(ABS2CLICK(ip->start_addr), ABS2CLICK(ip->len));
523 /* Set up table of calls. */
524 #define CALLMAP(code, func) { int _cmi; \
525 _cmi=CALLNUMBER(code); \
526 assert(_cmi >= 0); \
527 assert(_cmi < NR_VM_CALLS); \
528 vm_calls[_cmi].vmc_func = (func); \
529 vm_calls[_cmi].vmc_name = #code; \
532 /* Set call table to 0. This invalidates all calls (clear
533 * vmc_func).
535 memset(vm_calls, 0, sizeof(vm_calls));
537 /* Basic VM calls. */
538 CALLMAP(VM_MMAP, do_mmap);
539 CALLMAP(VM_MUNMAP, do_munmap);
540 CALLMAP(VM_MAP_PHYS, do_map_phys);
541 CALLMAP(VM_UNMAP_PHYS, do_munmap);
543 /* Calls from PM. */
544 CALLMAP(VM_EXIT, do_exit);
545 CALLMAP(VM_FORK, do_fork);
546 CALLMAP(VM_BRK, do_brk);
547 CALLMAP(VM_WILLEXIT, do_willexit);
549 CALLMAP(VM_PROCCTL, do_procctl_notrans);
551 /* Calls from VFS. */
552 CALLMAP(VM_VFS_REPLY, do_vfs_reply);
553 CALLMAP(VM_VFS_MMAP, do_vfs_mmap);
555 /* Calls from RS */
556 CALLMAP(VM_RS_SET_PRIV, do_rs_set_priv);
557 CALLMAP(VM_RS_PREPARE, do_rs_prepare);
558 CALLMAP(VM_RS_UPDATE, do_rs_update);
559 CALLMAP(VM_RS_MEMCTL, do_rs_memctl);
561 /* Generic calls. */
562 CALLMAP(VM_REMAP, do_remap);
563 CALLMAP(VM_REMAP_RO, do_remap);
564 CALLMAP(VM_GETPHYS, do_get_phys);
565 CALLMAP(VM_SHM_UNMAP, do_munmap);
566 CALLMAP(VM_GETREF, do_get_refcount);
567 CALLMAP(VM_INFO, do_info);
569 /* Cache blocks. */
570 CALLMAP(VM_MAPCACHEPAGE, do_mapcache);
571 CALLMAP(VM_SETCACHEPAGE, do_setcache);
572 CALLMAP(VM_FORGETCACHEPAGE, do_forgetcache);
573 CALLMAP(VM_CLEARCACHE, do_clearcache);
575 /* getrusage */
576 CALLMAP(VM_GETRUSAGE, do_getrusage);
578 /* Mark VM instances. */
579 num_vm_instances = 1;
580 vmproc[VM_PROC_NR].vm_flags |= VMF_VM_INSTANCE;
582 /* Let SEF know about VM mmapped regions. */
583 s = sef_llvm_add_special_mem_region((void*)VM_OWN_HEAPBASE,
584 VM_OWN_MMAPTOP-VM_OWN_HEAPBASE, "%MMAP_ALL");
585 if(s < 0) {
586 printf("VM: st_add_special_mmapped_region failed %d\n", s);
590 /*===========================================================================*
591 * sef_cb_init_vm_multi_lu *
592 *===========================================================================*/
593 static int sef_cb_init_vm_multi_lu(int type, sef_init_info_t *info)
595 message m;
596 int i, r;
597 ipc_filter_el_t ipc_filter[IPCF_MAX_ELEMENTS];
598 int num_elements;
600 if(type != SEF_INIT_LU || !(info->flags & SEF_LU_MULTI)) {
601 return OK;
604 /* If this is a multi-component update, we need to perform the update
605 * for services that need to be updated. In addition, make sure VM
606 * can only receive messages from RS, tasks, and other services being
607 * updated until RS specifically sends a special update cancel message.
608 * This is necessary to limit the number of VM state changes to support
609 * rollback. Allow only safe message types for safe updates.
611 memset(ipc_filter, 0, sizeof(ipc_filter));
612 num_elements = 0;
613 ipc_filter[num_elements].flags = IPCF_MATCH_M_SOURCE;
614 ipc_filter[num_elements++].m_source = RS_PROC_NR;
615 if((r = sys_safecopyfrom(RS_PROC_NR, info->rproctab_gid, 0,
616 (vir_bytes) rprocpub, NR_SYS_PROCS*sizeof(struct rprocpub))) != OK) {
617 panic("sys_safecopyfrom failed: %d", r);
619 m.m_source = VM_PROC_NR;
620 for(i=0;i < NR_SYS_PROCS;i++) {
621 if(rprocpub[i].in_use && rprocpub[i].old_endpoint != NONE) {
622 if(num_elements <= IPCF_MAX_ELEMENTS-5) {
623 /* VM_BRK is needed for normal operation during the live
624 * update. VM_INFO is needed for state transfer in the
625 * light of holes. Pagefaults and handle-memory requests
626 * are blocked intentionally, as handling these would
627 * prevent VM from being able to roll back.
629 ipc_filter[num_elements].flags = IPCF_MATCH_M_SOURCE | IPCF_MATCH_M_TYPE;
630 ipc_filter[num_elements].m_source = rprocpub[i].old_endpoint;
631 ipc_filter[num_elements++].m_type = VM_BRK;
632 ipc_filter[num_elements].flags = IPCF_MATCH_M_SOURCE | IPCF_MATCH_M_TYPE;
633 ipc_filter[num_elements].m_source = rprocpub[i].new_endpoint;
634 ipc_filter[num_elements++].m_type = VM_BRK;
635 ipc_filter[num_elements].flags = IPCF_MATCH_M_SOURCE | IPCF_MATCH_M_TYPE;
636 ipc_filter[num_elements].m_source = rprocpub[i].old_endpoint;
637 ipc_filter[num_elements++].m_type = VM_INFO;
638 ipc_filter[num_elements].flags = IPCF_MATCH_M_SOURCE | IPCF_MATCH_M_TYPE;
639 ipc_filter[num_elements].m_source = rprocpub[i].new_endpoint;
640 ipc_filter[num_elements++].m_type = VM_INFO;
641 /* Make sure we can talk to any RS instance. */
642 if(rprocpub[i].old_endpoint == RS_PROC_NR) {
643 ipc_filter[num_elements].flags = IPCF_MATCH_M_SOURCE;
644 ipc_filter[num_elements++].m_source = rprocpub[i].new_endpoint;
646 else if(rprocpub[i].new_endpoint == RS_PROC_NR) {
647 ipc_filter[num_elements].flags = IPCF_MATCH_M_SOURCE;
648 ipc_filter[num_elements++].m_source = rprocpub[i].old_endpoint;
651 else {
652 printf("sef_cb_init_vm_multi_lu: skipping ipc filter elements for %d and %d\n",
653 rprocpub[i].old_endpoint, rprocpub[i].new_endpoint);
655 if(rprocpub[i].sys_flags & SF_VM_UPDATE) {
656 m.m_lsys_vm_update.src = rprocpub[i].new_endpoint;
657 m.m_lsys_vm_update.dst = rprocpub[i].old_endpoint;
658 m.m_lsys_vm_update.flags = rprocpub[i].sys_flags;
659 r = do_rs_update(&m);
660 if(r != OK && r != SUSPEND) {
661 printf("sef_cb_init_vm_multi_lu: do_rs_update failed: %d", r);
667 r = sys_statectl(SYS_STATE_ADD_IPC_WL_FILTER, ipc_filter, num_elements*sizeof(ipc_filter_el_t));
668 if(r != OK) {
669 printf("sef_cb_init_vm_multi_lu: sys_statectl failed: %d", r);
672 return OK;
675 /*===========================================================================*
676 * sef_cb_init_lu_restart *
677 *===========================================================================*/
678 static int sef_cb_init_lu_restart(int type, sef_init_info_t *info)
680 /* Restart the vm server. */
681 int r;
682 endpoint_t old_e;
683 int old_p;
684 struct vmproc *old_vmp, *new_vmp;
686 /* Perform default state transfer first. */
687 if(type == SEF_INIT_LU) {
688 sef_setcb_init_restart(SEF_CB_INIT_RESTART_STATEFUL);
689 r = SEF_CB_INIT_LU_DEFAULT(type, info);
691 else {
692 r = SEF_CB_INIT_RESTART_STATEFUL(type, info);
694 if(r != OK) {
695 return r;
698 /* Lookup slots for old process. */
699 old_e = info->old_endpoint;
700 if(vm_isokendpt(old_e, &old_p) != OK) {
701 printf("sef_cb_init_lu_restart: bad old endpoint %d\n", old_e);
702 return EINVAL;
704 old_vmp = &vmproc[old_p];
705 new_vmp = &vmproc[VM_PROC_NR];
707 /* Swap proc slots and dynamic data. */
708 if((r = swap_proc_slot(old_vmp, new_vmp)) != OK) {
709 printf("sef_cb_init_lu_restart: swap_proc_slot failed\n");
710 return r;
712 if((r = swap_proc_dyn_data(old_vmp, new_vmp, 0)) != OK) {
713 printf("sef_cb_init_lu_restart: swap_proc_dyn_data failed\n");
714 return r;
717 /* Rebind page tables. */
718 pt_bind(&new_vmp->vm_pt, new_vmp);
719 pt_bind(&old_vmp->vm_pt, old_vmp);
720 pt_clearmapcache();
722 /* Adjust process references. */
723 adjust_proc_refs();
725 /* Handle multi-component live update when necessary. */
726 return sef_cb_init_vm_multi_lu(type, info);
729 /*===========================================================================*
730 * sef_cb_signal_handler *
731 *===========================================================================*/
732 static void sef_cb_signal_handler(int signo)
734 /* Check for known kernel signals, ignore anything else. */
735 switch(signo) {
736 /* There is a pending memory request from the kernel. */
737 case SIGKMEM:
738 do_memory();
739 break;
742 /* It can happen that we get stuck receiving signals
743 * without sef_receive() returning. We could need more memory
744 * though.
746 if(missing_spares > 0) {
747 alloc_cycle(); /* pagetable code wants to be called */
750 pt_clearmapcache();
753 /*===========================================================================*
754 * map_service *
755 *===========================================================================*/
756 static int map_service(struct rprocpub *rpub)
758 /* Map a new service by initializing its call mask. */
759 int r, proc_nr;
761 if ((r = vm_isokendpt(rpub->endpoint, &proc_nr)) != OK) {
762 return r;
765 /* Copy the call mask. */
766 acl_set(&vmproc[proc_nr], rpub->vm_call_mask, !IS_RPUB_BOOT_USR(rpub));
768 return(OK);