tty: don't use custom kputc; this fixes tty printf()s.
[minix.git] / servers / vm / main.c
blob346386019a9b870627dd8105fed0b056c1fca37c
2 #define _POSIX_SOURCE 1
3 #define _MINIX 1
4 #define _SYSTEM 1
6 #include <minix/callnr.h>
7 #include <minix/com.h>
8 #include <minix/config.h>
9 #include <minix/const.h>
10 #include <minix/ds.h>
11 #include <minix/endpoint.h>
12 #include <minix/keymap.h>
13 #include <minix/minlib.h>
14 #include <minix/type.h>
15 #include <minix/ipc.h>
16 #include <minix/sysutil.h>
17 #include <minix/syslib.h>
18 #include <minix/const.h>
19 #include <minix/bitmap.h>
20 #include <minix/crtso.h>
21 #include <minix/rs.h>
23 #include <errno.h>
24 #include <string.h>
25 #include <env.h>
26 #include <stdio.h>
27 #include <assert.h>
29 #include <memory.h>
31 #define _MAIN 1
32 #include "glo.h"
33 #include "proto.h"
34 #include "util.h"
35 #include "vm.h"
36 #include "sanitycheck.h"
38 extern int missing_spares;
40 #include <machine/archtypes.h>
41 #include "kernel/const.h"
42 #include "kernel/config.h"
43 #include "kernel/proc.h"
45 #include <signal.h>
47 /* Table of calls and a macro to test for being in range. */
48 struct {
49 int (*vmc_func)(message *); /* Call handles message. */
50 char *vmc_name; /* Human-readable string. */
51 } vm_calls[NR_VM_CALLS];
53 /* Macro to verify call range and map 'high' range to 'base' range
54 * (starting at 0) in one. Evaluates to zero-based call number if call
55 * number is valid, returns -1 otherwise.
57 #define CALLNUMBER(c) (((c) >= VM_RQ_BASE && \
58 (c) < VM_RQ_BASE + ELEMENTS(vm_calls)) ? \
59 ((c) - VM_RQ_BASE) : -1)
61 FORWARD _PROTOTYPE(int map_service, (struct rprocpub *rpub));
62 FORWARD _PROTOTYPE(int vm_acl_ok, (endpoint_t caller, int call));
64 extern int unmap_ok;
66 /* SEF functions and variables. */
67 FORWARD _PROTOTYPE( void sef_local_startup, (void) );
68 FORWARD _PROTOTYPE( int sef_cb_init_fresh, (int type, sef_init_info_t *info) );
69 FORWARD _PROTOTYPE( void sef_cb_signal_handler, (int signo) );
71 /*===========================================================================*
72 * main *
73 *===========================================================================*/
74 PUBLIC int main(void)
76 message msg;
77 int result, who_e, rcv_sts;
78 sigset_t sigset;
80 /* SEF local startup. */
81 sef_local_startup();
83 SANITYCHECK(SCL_TOP);
85 /* This is VM's main loop. */
86 while (TRUE) {
87 int r, c;
89 SANITYCHECK(SCL_TOP);
90 if(missing_spares > 0) {
91 pt_cycle(); /* pagetable code wants to be called */
93 SANITYCHECK(SCL_DETAIL);
95 if ((r=sef_receive_status(ANY, &msg, &rcv_sts)) != OK)
96 panic("sef_receive_status() error: %d", r);
98 SANITYCHECK(SCL_DETAIL);
100 if (is_ipc_notify(rcv_sts)) {
101 /* Unexpected notify(). */
102 printf("VM: ignoring notify() from %d\n", msg.m_source);
103 continue;
105 who_e = msg.m_source;
106 c = CALLNUMBER(msg.m_type);
107 result = ENOSYS; /* Out of range or restricted calls return this. */
108 if (msg.m_type == VM_PAGEFAULT) {
109 if (!IPC_STATUS_FLAGS_TEST(rcv_sts, IPC_FLG_MSG_FROM_KERNEL)) {
110 printf("VM: process %d faked VM_PAGEFAULT "
111 "message!\n", msg.m_source);
113 do_pagefaults(&msg);
115 * do not reply to this call, the caller is unblocked by
116 * a sys_vmctl() call in do_pagefaults if success. VM panics
117 * otherwise
119 continue;
120 } else if(c < 0 || !vm_calls[c].vmc_func) {
121 printf("VM: out of range or missing callnr %d from %d\n",
122 msg.m_type, who_e);
123 } else {
124 if (vm_acl_ok(who_e, c) != OK) {
125 printf("VM: unauthorized %s by %d\n",
126 vm_calls[c].vmc_name, who_e);
127 } else {
128 SANITYCHECK(SCL_FUNCTIONS);
129 result = vm_calls[c].vmc_func(&msg);
130 SANITYCHECK(SCL_FUNCTIONS);
134 /* Send reply message, unless the return code is SUSPEND,
135 * which is a pseudo-result suppressing the reply message.
137 if(result != SUSPEND) {
138 SANITYCHECK(SCL_DETAIL);
139 msg.m_type = result;
140 if((r=send(who_e, &msg)) != OK) {
141 printf("VM: couldn't send %d to %d (err %d)\n",
142 msg.m_type, who_e, r);
143 panic("send() error");
145 SANITYCHECK(SCL_DETAIL);
147 SANITYCHECK(SCL_DETAIL);
149 return(OK);
152 /*===========================================================================*
153 * sef_local_startup *
154 *===========================================================================*/
155 PRIVATE void sef_local_startup()
157 /* Register init callbacks. */
158 sef_setcb_init_fresh(sef_cb_init_fresh);
159 sef_setcb_init_restart(sef_cb_init_fail);
161 /* No live update support for now. */
163 /* Register signal callbacks. */
164 sef_setcb_signal_handler(sef_cb_signal_handler);
166 /* Let SEF perform startup. */
167 sef_startup();
170 /*===========================================================================*
171 * sef_cb_init_fresh *
172 *===========================================================================*/
173 PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
175 /* Initialize the vm server. */
176 int s, i;
177 int click, clicksforgotten = 0;
178 struct memory mem_chunks[NR_MEMS];
179 struct boot_image image[NR_BOOT_PROCS];
180 struct boot_image *ip;
181 struct rprocpub rprocpub[NR_BOOT_PROCS];
182 phys_bytes limit = 0;
184 #if SANITYCHECKS
185 incheck = nocheck = 0;
186 #endif
188 vm_paged = 1;
189 env_parse("vm_paged", "d", 0, &vm_paged, 0, 1);
190 #if SANITYCHECKS
191 env_parse("vm_sanitychecklevel", "d", 0, &vm_sanitychecklevel, 0, SCL_MAX);
192 #endif
194 /* Get chunks of available memory. */
195 get_mem_chunks(mem_chunks);
197 /* Initialize VM's process table. Request a copy of the system
198 * image table that is defined at the kernel level to see which
199 * slots to fill in.
201 if (OK != (s=sys_getimage(image)))
202 panic("couldn't get image table: %d", s);
204 /* Set table to 0. This invalidates all slots (clear VMF_INUSE). */
205 memset(vmproc, 0, sizeof(vmproc));
207 for(i = 0; i < ELEMENTS(vmproc); i++) {
208 vmproc[i].vm_slot = i;
211 /* Walk through boot-time system processes that are alive
212 * now and make valid slot entries for them.
214 for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
215 phys_bytes proclimit;
216 struct vmproc *vmp;
218 if(ip->proc_nr >= _NR_PROCS) { panic("proc: %d", ip->proc_nr); }
219 if(ip->proc_nr < 0 && ip->proc_nr != SYSTEM) continue;
221 #define GETVMP(v, nr) \
222 if(nr >= 0) { \
223 vmp = &vmproc[ip->proc_nr]; \
224 } else if(nr == SYSTEM) { \
225 vmp = &vmproc[VMP_SYSTEM]; \
226 } else { \
227 panic("init: crazy proc_nr: %d", nr); \
230 /* Initialize normal process table slot or special SYSTEM
231 * table slot. Kernel memory is already reserved.
233 GETVMP(vmp, ip->proc_nr);
235 /* reset fields as if exited */
236 clear_proc(vmp);
238 /* Get memory map for this process from the kernel. */
239 if ((s=get_mem_map(ip->proc_nr, vmp->vm_arch.vm_seg)) != OK)
240 panic("couldn't get process mem_map: %d", s);
242 /* Remove this memory from the free list. */
243 reserve_proc_mem(mem_chunks, vmp->vm_arch.vm_seg);
245 /* Set memory limit. */
246 proclimit = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys +
247 vmp->vm_arch.vm_seg[S].mem_len) - 1;
249 if(proclimit > limit)
250 limit = proclimit;
252 vmp->vm_flags = VMF_INUSE;
253 vmp->vm_endpoint = ip->endpoint;
254 vmp->vm_stacktop =
255 CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
256 vmp->vm_arch.vm_seg[S].mem_len);
258 if (vmp->vm_arch.vm_seg[T].mem_len != 0)
259 vmp->vm_flags |= VMF_SEPARATE;
262 /* Architecture-dependent initialization. */
263 pt_init(limit);
265 /* Initialize tables to all physical memory. */
266 mem_init(mem_chunks);
267 meminit_done = 1;
269 /* Give these processes their own page table. */
270 for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
271 int s;
272 struct vmproc *vmp;
273 vir_bytes old_stacktop, old_stack;
275 if(ip->proc_nr < 0) continue;
277 GETVMP(vmp, ip->proc_nr);
279 if(!(ip->flags & PROC_FULLVM))
280 continue;
282 old_stack =
283 vmp->vm_arch.vm_seg[S].mem_vir +
284 vmp->vm_arch.vm_seg[S].mem_len -
285 vmp->vm_arch.vm_seg[D].mem_len;
287 if(pt_new(&vmp->vm_pt) != OK)
288 panic("VM: no new pagetable");
289 #define BASICSTACK VM_PAGE_SIZE
290 old_stacktop = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
291 vmp->vm_arch.vm_seg[S].mem_len);
292 if(sys_vmctl(vmp->vm_endpoint, VMCTL_INCSP,
293 VM_STACKTOP - old_stacktop) != OK) {
294 panic("VM: vmctl for new stack failed");
297 free_mem(vmp->vm_arch.vm_seg[D].mem_phys +
298 vmp->vm_arch.vm_seg[D].mem_len,
299 old_stack);
301 if(proc_new(vmp,
302 VM_PROCSTART,
303 CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_len),
304 CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_len),
305 BASICSTACK,
306 CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
307 vmp->vm_arch.vm_seg[S].mem_len -
308 vmp->vm_arch.vm_seg[D].mem_len) - BASICSTACK,
309 CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys),
310 CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys),
311 VM_STACKTOP, 0) != OK) {
312 panic("failed proc_new for boot process");
316 /* Set up table of calls. */
317 #define CALLMAP(code, func) { int i; \
318 if((i=CALLNUMBER(code)) < 0) { panic(#code " invalid: %d", (code)); } \
319 if(i >= NR_VM_CALLS) { panic(#code " invalid: %d", (code)); } \
320 vm_calls[i].vmc_func = (func); \
321 vm_calls[i].vmc_name = #code; \
324 /* Set call table to 0. This invalidates all calls (clear
325 * vmc_func).
327 memset(vm_calls, 0, sizeof(vm_calls));
329 /* Basic VM calls. */
330 CALLMAP(VM_MMAP, do_mmap);
331 CALLMAP(VM_MUNMAP, do_munmap);
332 CALLMAP(VM_MUNMAP_TEXT, do_munmap);
333 CALLMAP(VM_MAP_PHYS, do_map_phys);
334 CALLMAP(VM_UNMAP_PHYS, do_unmap_phys);
336 /* Calls from PM. */
337 CALLMAP(VM_EXIT, do_exit);
338 CALLMAP(VM_FORK, do_fork);
339 CALLMAP(VM_BRK, do_brk);
340 CALLMAP(VM_EXEC_NEWMEM, do_exec_newmem);
341 CALLMAP(VM_PUSH_SIG, do_push_sig);
342 CALLMAP(VM_WILLEXIT, do_willexit);
343 CALLMAP(VM_ADDDMA, do_adddma);
344 CALLMAP(VM_DELDMA, do_deldma);
345 CALLMAP(VM_GETDMA, do_getdma);
346 CALLMAP(VM_NOTIFY_SIG, do_notify_sig);
348 /* Calls from RS */
349 CALLMAP(VM_RS_SET_PRIV, do_rs_set_priv);
350 CALLMAP(VM_RS_UPDATE, do_rs_update);
352 /* Generic calls. */
353 CALLMAP(VM_REMAP, do_remap);
354 CALLMAP(VM_GETPHYS, do_get_phys);
355 CALLMAP(VM_SHM_UNMAP, do_shared_unmap);
356 CALLMAP(VM_GETREF, do_get_refcount);
357 CALLMAP(VM_INFO, do_info);
358 CALLMAP(VM_QUERY_EXIT, do_query_exit);
360 /* Sanity checks */
361 if(find_kernel_top() >= VM_PROCSTART)
362 panic("kernel loaded too high");
364 /* Initialize the structures for queryexit */
365 init_query_exit();
367 /* Unmap our own low pages. */
368 unmap_ok = 1;
369 _minix_unmapzero();
371 /* Map all the services in the boot image. */
372 if((s = sys_safecopyfrom(RS_PROC_NR, info->rproctab_gid, 0,
373 (vir_bytes) rprocpub, sizeof(rprocpub), S)) != OK) {
374 panic("sys_safecopyfrom failed: %d", s);
376 for(i=0;i < NR_BOOT_PROCS;i++) {
377 if(rprocpub[i].in_use) {
378 if((s = map_service(&rprocpub[i])) != OK) {
379 panic("unable to map service: %d", s);
384 return(OK);
387 /*===========================================================================*
388 * sef_cb_signal_handler *
389 *===========================================================================*/
390 PRIVATE void sef_cb_signal_handler(int signo)
392 /* Check for known kernel signals, ignore anything else. */
393 switch(signo) {
394 /* There is a pending memory request from the kernel. */
395 case SIGKMEM:
396 do_memory();
397 break;
401 /*===========================================================================*
402 * map_service *
403 *===========================================================================*/
404 PRIVATE int map_service(rpub)
405 struct rprocpub *rpub;
407 /* Map a new service by initializing its call mask. */
408 int r, proc_nr;
410 if ((r = vm_isokendpt(rpub->endpoint, &proc_nr)) != OK) {
411 return r;
414 /* Copy the call mask. */
415 memcpy(&vmproc[proc_nr].vm_call_mask, &rpub->vm_call_mask,
416 sizeof(vmproc[proc_nr].vm_call_mask));
418 return(OK);
421 /*===========================================================================*
422 * vm_acl_ok *
423 *===========================================================================*/
424 PRIVATE int vm_acl_ok(endpoint_t caller, int call)
426 int n, r;
428 if ((r = vm_isokendpt(caller, &n)) != OK)
429 panic("VM: from strange source: %d", caller);
431 /* See if the call is allowed. */
432 if (!GET_BIT(vmproc[n].vm_call_mask, call)) {
433 return EPERM;
436 return OK;