panic() cleanup.
[minix.git] / servers / vm / main.c
blobd0a7f8a5fbd08471a994304552d2ada14bce513d
2 #define _SYSTEM 1
4 #include <minix/callnr.h>
5 #include <minix/com.h>
6 #include <minix/config.h>
7 #include <minix/const.h>
8 #include <minix/ds.h>
9 #include <minix/endpoint.h>
10 #include <minix/keymap.h>
11 #include <minix/minlib.h>
12 #include <minix/type.h>
13 #include <minix/ipc.h>
14 #include <minix/sysutil.h>
15 #include <minix/syslib.h>
16 #include <minix/const.h>
17 #include <minix/bitmap.h>
18 #include <minix/crtso.h>
19 #include <minix/rs.h>
21 #include <errno.h>
22 #include <string.h>
23 #include <env.h>
24 #include <stdio.h>
26 #include <memory.h>
28 #define _MAIN 1
29 #include "glo.h"
30 #include "proto.h"
31 #include "util.h"
32 #include "vm.h"
33 #include "sanitycheck.h"
35 extern int missing_spares;
37 #include <archtypes.h>
38 #include "../../kernel/const.h"
39 #include "../../kernel/config.h"
40 #include "../../kernel/proc.h"
42 /* Table of calls and a macro to test for being in range. */
43 struct {
44 int (*vmc_func)(message *); /* Call handles message. */
45 char *vmc_name; /* Human-readable string. */
46 } vm_calls[NR_VM_CALLS];
48 /* Macro to verify call range and map 'high' range to 'base' range
49 * (starting at 0) in one. Evaluates to zero-based call number if call
50 * number is valid, returns -1 otherwise.
52 #define CALLNUMBER(c) (((c) >= VM_RQ_BASE && \
53 (c) < VM_RQ_BASE + ELEMENTS(vm_calls)) ? \
54 ((c) - VM_RQ_BASE) : -1)
56 FORWARD _PROTOTYPE(int map_service, (struct rprocpub *rpub));
57 FORWARD _PROTOTYPE(int vm_acl_ok, (endpoint_t caller, int call));
59 extern int unmap_ok;
61 /* SEF functions and variables. */
62 FORWARD _PROTOTYPE( void sef_local_startup, (void) );
63 FORWARD _PROTOTYPE( int sef_cb_init_fresh, (int type, sef_init_info_t *info) );
65 /*===========================================================================*
66 * main *
67 *===========================================================================*/
68 PUBLIC int main(void)
70 message msg;
71 int result, who_e;
73 /* SEF local startup. */
74 sef_local_startup();
76 /* This is VM's main loop. */
77 while (TRUE) {
78 int r, c;
80 SANITYCHECK(SCL_TOP);
81 if(missing_spares > 0) {
82 pt_cycle(); /* pagetable code wants to be called */
84 SANITYCHECK(SCL_DETAIL);
86 if ((r=sef_receive(ANY, &msg)) != OK)
87 panic("sef_receive() error: %d", r);
89 SANITYCHECK(SCL_DETAIL);
91 if(msg.m_type & NOTIFY_MESSAGE) {
92 switch(msg.m_source) {
93 case SYSTEM:
94 /* Kernel wants to have memory ranges
95 * verified, and/or pagefaults handled.
97 do_memory();
98 break;
99 case HARDWARE:
100 do_pagefaults();
101 break;
102 case PM_PROC_NR:
103 /* PM sends a notify() on shutdown, which
104 * is OK and we ignore.
106 break;
107 default:
108 /* No-one else should send us notifies. */
109 printf("VM: ignoring notify() from %d\n",
110 msg.m_source);
111 break;
113 continue;
115 who_e = msg.m_source;
116 c = CALLNUMBER(msg.m_type);
117 result = ENOSYS; /* Out of range or restricted calls return this. */
118 if(c < 0 || !vm_calls[c].vmc_func) {
119 printf("VM: out of range or missing callnr %d from %d\n",
120 msg.m_type, who_e);
121 } else if (vm_acl_ok(who_e, c) != OK) {
122 printf("VM: unauthorized %s by %d\n",
123 vm_calls[c].vmc_name, who_e);
124 } else {
125 SANITYCHECK(SCL_FUNCTIONS);
126 result = vm_calls[c].vmc_func(&msg);
127 SANITYCHECK(SCL_FUNCTIONS);
130 /* Send reply message, unless the return code is SUSPEND,
131 * which is a pseudo-result suppressing the reply message.
133 if(result != SUSPEND) {
134 SANITYCHECK(SCL_DETAIL);
135 msg.m_type = result;
136 if((r=send(who_e, &msg)) != OK) {
137 printf("VM: couldn't send %d to %d (err %d)\n",
138 msg.m_type, who_e, r);
139 panic("send() error");
141 SANITYCHECK(SCL_DETAIL);
143 SANITYCHECK(SCL_DETAIL);
145 return(OK);
148 /*===========================================================================*
149 * sef_local_startup *
150 *===========================================================================*/
151 PRIVATE void sef_local_startup()
153 /* Register init callbacks. */
154 sef_setcb_init_fresh(sef_cb_init_fresh);
155 sef_setcb_init_restart(sef_cb_init_restart_fail);
157 /* No live update support for now. */
159 /* Let SEF perform startup. */
160 sef_startup();
163 /*===========================================================================*
164 * sef_cb_init_fresh *
165 *===========================================================================*/
166 PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
168 /* Initialize the vm server. */
169 int s, i;
170 int click, clicksforgotten = 0;
171 struct memory mem_chunks[NR_MEMS];
172 struct boot_image image[NR_BOOT_PROCS];
173 struct boot_image *ip;
174 struct rprocpub rprocpub[NR_BOOT_PROCS];
175 phys_bytes limit = 0;
177 #if SANITYCHECKS
178 incheck = nocheck = 0;
179 FIXME("VM SANITYCHECKS are on");
180 #endif
182 vm_paged = 1;
183 env_parse("vm_paged", "d", 0, &vm_paged, 0, 1);
184 #if SANITYCHECKS
185 env_parse("vm_sanitychecklevel", "d", 0, &vm_sanitychecklevel, 0, SCL_MAX);
186 #endif
188 /* Get chunks of available memory. */
189 get_mem_chunks(mem_chunks);
191 /* Initialize VM's process table. Request a copy of the system
192 * image table that is defined at the kernel level to see which
193 * slots to fill in.
195 if (OK != (s=sys_getimage(image)))
196 panic("couldn't get image table: %d", s);
198 /* Set table to 0. This invalidates all slots (clear VMF_INUSE). */
199 memset(vmproc, 0, sizeof(vmproc));
201 for(i = 0; i < ELEMENTS(vmproc); i++) {
202 vmproc[i].vm_slot = i;
205 /* Walk through boot-time system processes that are alive
206 * now and make valid slot entries for them.
208 for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
209 phys_bytes proclimit;
210 struct vmproc *vmp;
212 if(ip->proc_nr >= _NR_PROCS) { panic("proc: %d", ip->proc_nr); }
213 if(ip->proc_nr < 0 && ip->proc_nr != SYSTEM) continue;
215 #define GETVMP(v, nr) \
216 if(nr >= 0) { \
217 vmp = &vmproc[ip->proc_nr]; \
218 } else if(nr == SYSTEM) { \
219 vmp = &vmproc[VMP_SYSTEM]; \
220 } else { \
221 panic("init: crazy proc_nr: %d", nr); \
224 /* Initialize normal process table slot or special SYSTEM
225 * table slot. Kernel memory is already reserved.
227 GETVMP(vmp, ip->proc_nr);
229 /* reset fields as if exited */
230 clear_proc(vmp);
232 /* Get memory map for this process from the kernel. */
233 if ((s=get_mem_map(ip->proc_nr, vmp->vm_arch.vm_seg)) != OK)
234 panic("couldn't get process mem_map: %d", s);
236 /* Remove this memory from the free list. */
237 reserve_proc_mem(mem_chunks, vmp->vm_arch.vm_seg);
239 /* Set memory limit. */
240 proclimit = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys +
241 vmp->vm_arch.vm_seg[S].mem_len) - 1;
243 if(proclimit > limit)
244 limit = proclimit;
246 vmp->vm_flags = VMF_INUSE;
247 vmp->vm_endpoint = ip->endpoint;
248 vmp->vm_stacktop =
249 CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
250 vmp->vm_arch.vm_seg[S].mem_len);
252 if (vmp->vm_arch.vm_seg[T].mem_len != 0)
253 vmp->vm_flags |= VMF_SEPARATE;
256 /* Architecture-dependent initialization. */
257 pt_init(limit);
259 /* Initialize tables to all physical memory. */
260 mem_init(mem_chunks);
261 meminit_done = 1;
263 /* Give these processes their own page table. */
264 for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
265 int s;
266 struct vmproc *vmp;
267 vir_bytes old_stacktop, old_stack;
269 if(ip->proc_nr < 0) continue;
271 GETVMP(vmp, ip->proc_nr);
273 if(!(ip->flags & PROC_FULLVM))
274 continue;
276 old_stack =
277 vmp->vm_arch.vm_seg[S].mem_vir +
278 vmp->vm_arch.vm_seg[S].mem_len -
279 vmp->vm_arch.vm_seg[D].mem_len;
281 if(pt_new(&vmp->vm_pt) != OK)
282 panic("VM: no new pagetable");
283 #define BASICSTACK VM_PAGE_SIZE
284 old_stacktop = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
285 vmp->vm_arch.vm_seg[S].mem_len);
286 if(sys_vmctl(vmp->vm_endpoint, VMCTL_INCSP,
287 VM_STACKTOP - old_stacktop) != OK) {
288 panic("VM: vmctl for new stack failed");
291 FREE_MEM(vmp->vm_arch.vm_seg[D].mem_phys +
292 vmp->vm_arch.vm_seg[D].mem_len,
293 old_stack);
295 if(proc_new(vmp,
296 VM_PROCSTART,
297 CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_len),
298 CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_len),
299 BASICSTACK,
300 CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
301 vmp->vm_arch.vm_seg[S].mem_len -
302 vmp->vm_arch.vm_seg[D].mem_len) - BASICSTACK,
303 CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys),
304 CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys),
305 VM_STACKTOP) != OK) {
306 panic("failed proc_new for boot process");
310 /* Set up table of calls. */
311 #define CALLMAP(code, func) { int i; \
312 if((i=CALLNUMBER(code)) < 0) { panic(#code " invalid: %d", (code)); } \
313 if(i >= NR_VM_CALLS) { panic(#code " invalid: %d", (code)); } \
314 vm_calls[i].vmc_func = (func); \
315 vm_calls[i].vmc_name = #code; \
318 /* Set call table to 0. This invalidates all calls (clear
319 * vmc_func).
321 memset(vm_calls, 0, sizeof(vm_calls));
323 /* Basic VM calls. */
324 CALLMAP(VM_MMAP, do_mmap);
325 CALLMAP(VM_MUNMAP, do_munmap);
326 CALLMAP(VM_MUNMAP_TEXT, do_munmap);
327 CALLMAP(VM_MAP_PHYS, do_map_phys);
328 CALLMAP(VM_UNMAP_PHYS, do_unmap_phys);
330 /* Calls from PM. */
331 CALLMAP(VM_EXIT, do_exit);
332 CALLMAP(VM_FORK, do_fork);
333 CALLMAP(VM_BRK, do_brk);
334 CALLMAP(VM_EXEC_NEWMEM, do_exec_newmem);
335 CALLMAP(VM_PUSH_SIG, do_push_sig);
336 CALLMAP(VM_WILLEXIT, do_willexit);
337 CALLMAP(VM_ADDDMA, do_adddma);
338 CALLMAP(VM_DELDMA, do_deldma);
339 CALLMAP(VM_GETDMA, do_getdma);
340 CALLMAP(VM_NOTIFY_SIG, do_notify_sig);
342 /* Calls from RS */
343 CALLMAP(VM_RS_SET_PRIV, do_rs_set_priv);
345 /* Generic calls. */
346 CALLMAP(VM_REMAP, do_remap);
347 CALLMAP(VM_GETPHYS, do_get_phys);
348 CALLMAP(VM_SHM_UNMAP, do_shared_unmap);
349 CALLMAP(VM_GETREF, do_get_refcount);
350 CALLMAP(VM_INFO, do_info);
351 CALLMAP(VM_QUERY_EXIT, do_query_exit);
353 /* Sanity checks */
354 if(find_kernel_top() >= VM_PROCSTART)
355 panic("kernel loaded too high");
357 /* Initialize the structures for queryexit */
358 init_query_exit();
360 /* Unmap our own low pages. */
361 unmap_ok = 1;
362 _minix_unmapzero();
364 /* Map all the services in the boot image. */
365 if((s = sys_safecopyfrom(RS_PROC_NR, info->rproctab_gid, 0,
366 (vir_bytes) rprocpub, sizeof(rprocpub), S)) != OK) {
367 panic("sys_safecopyfrom failed: %d", s);
369 for(i=0;i < NR_BOOT_PROCS;i++) {
370 if(rprocpub[i].in_use) {
371 if((s = map_service(&rprocpub[i])) != OK) {
372 panic("unable to map service: %d", s);
377 return(OK);
380 /*===========================================================================*
381 * map_service *
382 *===========================================================================*/
383 PRIVATE int map_service(rpub)
384 struct rprocpub *rpub;
386 /* Map a new service by initializing its call mask. */
387 int r, proc_nr;
389 if ((r = vm_isokendpt(rpub->endpoint, &proc_nr)) != OK) {
390 return r;
393 /* Copy the call mask. */
394 memcpy(&vmproc[proc_nr].vm_call_mask, &rpub->vm_call_mask,
395 sizeof(vmproc[proc_nr].vm_call_mask));
397 return(OK);
400 /*===========================================================================*
401 * vm_acl_ok *
402 *===========================================================================*/
403 PRIVATE int vm_acl_ok(endpoint_t caller, int call)
405 int n, r;
407 if ((r = vm_isokendpt(caller, &n)) != OK)
408 panic("VM: from strange source: %d", caller);
410 /* See if the call is allowed. */
411 if (!GET_BIT(vmproc[n].vm_call_mask, call)) {
412 return EPERM;
415 return OK;