2 #define _POSIX_SOURCE 1
6 #include <minix/callnr.h>
8 #include <minix/config.h>
9 #include <minix/const.h>
11 #include <minix/endpoint.h>
12 #include <minix/keymap.h>
13 #include <minix/minlib.h>
14 #include <minix/type.h>
15 #include <minix/ipc.h>
16 #include <minix/sysutil.h>
17 #include <minix/syslib.h>
18 #include <minix/const.h>
19 #include <minix/bitmap.h>
20 #include <minix/crtso.h>
38 #include "sanitycheck.h"
40 extern int missing_spares
;
42 #include <machine/archtypes.h>
43 #include <sys/param.h>
44 #include "kernel/const.h"
45 #include "kernel/config.h"
46 #include "kernel/proc.h"
50 /* Table of calls and a macro to test for being in range. */
52 int (*vmc_func
)(message
*); /* Call handles message. */
53 char *vmc_name
; /* Human-readable string. */
54 } vm_calls
[NR_VM_CALLS
];
56 /* Macro to verify call range and map 'high' range to 'base' range
57 * (starting at 0) in one. Evaluates to zero-based call number if call
58 * number is valid, returns -1 otherwise.
60 #define CALLNUMBER(c) (((c) >= VM_RQ_BASE && \
61 (c) < VM_RQ_BASE + ELEMENTS(vm_calls)) ? \
62 ((c) - VM_RQ_BASE) : -1)
64 static int map_service(struct rprocpub
*rpub
);
65 static int vm_acl_ok(endpoint_t caller
, int call
);
66 static int do_rs_init(message
*m
);
68 /* SEF functions and variables. */
69 static void sef_cb_signal_handler(int signo
);
73 /*===========================================================================*
75 *===========================================================================*/
79 int result
, who_e
, rcv_sts
;
82 /* Initialize system so that all processes are runnable */
85 /* Register init callbacks. */
86 sef_setcb_init_restart(sef_cb_init_fail
);
87 sef_setcb_signal_handler(sef_cb_signal_handler
);
89 /* Let SEF perform startup. */
94 /* This is VM's main loop. */
99 if(missing_spares
> 0) {
100 pt_cycle(); /* pagetable code wants to be called */
103 if ((r
=sef_receive_status(ANY
, &msg
, &rcv_sts
)) != OK
)
104 panic("sef_receive_status() error: %d", r
);
106 if (is_ipc_notify(rcv_sts
)) {
107 /* Unexpected notify(). */
108 printf("VM: ignoring notify() from %d\n", msg
.m_source
);
111 who_e
= msg
.m_source
;
112 if(vm_isokendpt(who_e
, &caller_slot
) != OK
)
113 panic("invalid caller %d", who_e
);
114 c
= CALLNUMBER(msg
.m_type
);
115 result
= ENOSYS
; /* Out of range or restricted calls return this. */
117 if(msg
.m_type
== RS_INIT
&& msg
.m_source
== RS_PROC_NR
) {
118 result
= do_rs_init(&msg
);
119 } else if (msg
.m_type
== VM_PAGEFAULT
) {
120 if (!IPC_STATUS_FLAGS_TEST(rcv_sts
, IPC_FLG_MSG_FROM_KERNEL
)) {
121 printf("VM: process %d faked VM_PAGEFAULT "
122 "message!\n", msg
.m_source
);
127 * do not reply to this call, the caller is unblocked by
128 * a sys_vmctl() call in do_pagefaults if success. VM panics
132 } else if(c
< 0 || !vm_calls
[c
].vmc_func
) {
133 /* out of range or missing callnr */
135 if (vm_acl_ok(who_e
, c
) != OK
) {
136 printf("VM: unauthorized %s by %d\n",
137 vm_calls
[c
].vmc_name
, who_e
);
139 SANITYCHECK(SCL_FUNCTIONS
);
140 result
= vm_calls
[c
].vmc_func(&msg
);
141 SANITYCHECK(SCL_FUNCTIONS
);
145 /* Send reply message, unless the return code is SUSPEND,
146 * which is a pseudo-result suppressing the reply message.
148 if(result
!= SUSPEND
) {
150 if((r
=send(who_e
, &msg
)) != OK
) {
151 printf("VM: couldn't send %d to %d (err %d)\n",
152 msg
.m_type
, who_e
, r
);
153 panic("send() error");
160 static int do_rs_init(message
*m
)
163 static struct rprocpub rprocpub
[NR_BOOT_PROCS
];
165 /* Map all the services in the boot image. */
166 if((s
= sys_safecopyfrom(RS_PROC_NR
, m
->RS_INIT_RPROCTAB_GID
, 0,
167 (vir_bytes
) rprocpub
, sizeof(rprocpub
))) != OK
) {
168 panic("vm: sys_safecopyfrom (rs) failed: %d", s
);
171 for(i
=0;i
< NR_BOOT_PROCS
;i
++) {
172 if(rprocpub
[i
].in_use
) {
173 if((s
= map_service(&rprocpub
[i
])) != OK
) {
174 panic("unable to map service: %d", s
);
179 /* RS expects this response that it then again wants to reply to: */
180 m
->RS_INIT_RESULT
= OK
;
181 sendrec(RS_PROC_NR
, m
);
186 struct vmproc
*init_proc(endpoint_t ep_nr
)
188 static struct boot_image
*ip
;
190 for (ip
= &kernel_boot_info
.boot_procs
[0];
191 ip
< &kernel_boot_info
.boot_procs
[NR_BOOT_PROCS
]; ip
++) {
194 if(ip
->proc_nr
!= ep_nr
) continue;
196 if(ip
->proc_nr
>= _NR_PROCS
|| ip
->proc_nr
< 0)
197 panic("proc: %d", ip
->proc_nr
);
199 vmp
= &vmproc
[ip
->proc_nr
];
200 assert(!(vmp
->vm_flags
& VMF_INUSE
)); /* no double procs */
202 vmp
->vm_flags
= VMF_INUSE
;
203 vmp
->vm_endpoint
= ip
->endpoint
;
209 panic("no init_proc");
212 struct vm_exec_info
{
213 struct exec_info execi
;
214 struct boot_image
*ip
;
218 static int libexec_copy_physcopy(struct exec_info
*execi
,
219 off_t off
, off_t vaddr
, size_t len
)
222 struct vm_exec_info
*ei
= execi
->opaque
;
223 end
= ei
->ip
->start_addr
+ ei
->ip
->len
;
224 assert(ei
->ip
->start_addr
+ off
+ len
<= end
);
225 return sys_physcopy(NONE
, ei
->ip
->start_addr
+ off
,
226 execi
->proc_e
, vaddr
, len
);
229 static void boot_alloc(struct exec_info
*execi
, off_t vaddr
,
230 size_t len
, int flags
)
232 struct vmproc
*vmp
= ((struct vm_exec_info
*) execi
->opaque
)->vmp
;
234 if(!(map_page_region(vmp
, vaddr
, 0,
235 len
, MAP_NONE
, VR_ANON
| VR_WRITABLE
| VR_UNINITIALIZED
, flags
))) {
236 panic("VM: exec: map_page_region for boot process failed");
240 static int libexec_alloc_vm_prealloc(struct exec_info
*execi
,
241 off_t vaddr
, size_t len
)
243 boot_alloc(execi
, vaddr
, len
, MF_PREALLOC
);
247 static int libexec_alloc_vm_ondemand(struct exec_info
*execi
,
248 off_t vaddr
, size_t len
)
250 boot_alloc(execi
, vaddr
, len
, 0);
254 void exec_bootproc(struct vmproc
*vmp
, struct boot_image
*ip
)
256 struct vm_exec_info vmexeci
;
257 struct exec_info
*execi
= &vmexeci
.execi
;
258 char hdr
[VM_PAGE_SIZE
];
260 memset(&vmexeci
, 0, sizeof(vmexeci
));
262 if(pt_new(&vmp
->vm_pt
) != OK
)
263 panic("VM: no new pagetable");
265 if(pt_bind(&vmp
->vm_pt
, vmp
) != OK
)
266 panic("VM: pt_bind failed");
268 if(sys_physcopy(NONE
, ip
->start_addr
, SELF
,
269 (vir_bytes
) hdr
, sizeof(hdr
)) != OK
)
270 panic("can't look at boot proc header");
272 execi
->stack_high
= kernel_boot_info
.user_sp
;
273 execi
->stack_size
= DEFAULT_STACK_LIMIT
;
274 execi
->proc_e
= vmp
->vm_endpoint
;
276 execi
->hdr_len
= sizeof(hdr
);
277 strlcpy(execi
->progname
, ip
->proc_name
, sizeof(execi
->progname
));
278 execi
->frame_len
= 0;
279 execi
->opaque
= &vmexeci
;
284 /* callback functions and data */
285 execi
->copymem
= libexec_copy_physcopy
;
286 execi
->clearproc
= NULL
;
287 execi
->clearmem
= libexec_clear_sys_memset
;
288 execi
->allocmem_prealloc
= libexec_alloc_vm_prealloc
;
289 execi
->allocmem_ondemand
= libexec_alloc_vm_ondemand
;
291 if(libexec_load_elf(execi
) != OK
)
292 panic("vm: boot process load of %d failed\n", vmp
->vm_endpoint
);
294 if(sys_exec(vmp
->vm_endpoint
, (char *) execi
->stack_high
- 12,
295 (char *) ip
->proc_name
, execi
->pc
) != OK
)
296 panic("vm: boot process exec of %d failed\n", vmp
->vm_endpoint
);
302 static struct memory mem_chunks
[NR_MEMS
];
303 static struct boot_image
*ip
;
306 incheck
= nocheck
= 0;
309 /* Retrieve various crucial boot parameters */
310 if(OK
!= (s
=sys_getkinfo(&kernel_boot_info
))) {
311 panic("couldn't get bootinfo: %d", s
);
315 assert(kernel_boot_info
.mmap_size
> 0);
316 assert(kernel_boot_info
.mods_with_kernel
> 0);
319 env_parse("vm_sanitychecklevel", "d", 0, &vm_sanitychecklevel
, 0, SCL_MAX
);
322 /* Get chunks of available memory. */
323 get_mem_chunks(mem_chunks
);
325 /* Set table to 0. This invalidates all slots (clear VMF_INUSE). */
326 memset(vmproc
, 0, sizeof(vmproc
));
328 for(i
= 0; i
< ELEMENTS(vmproc
); i
++) {
329 vmproc
[i
].vm_slot
= i
;
332 /* region management initialization. */
335 /* Architecture-dependent initialization. */
336 init_proc(VM_PROC_NR
);
339 /* Initialize tables to all physical memory. */
340 mem_init(mem_chunks
);
343 /* Give these processes their own page table. */
344 for (ip
= &kernel_boot_info
.boot_procs
[0];
345 ip
< &kernel_boot_info
.boot_procs
[NR_BOOT_PROCS
]; ip
++) {
348 if(ip
->proc_nr
< 0) continue;
350 assert(ip
->start_addr
);
352 /* VM has already been set up by the kernel and pt_init().
353 * Any other boot process is already in memory and is set up
356 if(ip
->proc_nr
== VM_PROC_NR
) continue;
358 vmp
= init_proc(ip
->proc_nr
);
360 exec_bootproc(vmp
, ip
);
362 /* Free the file blob */
363 assert(!(ip
->start_addr
% VM_PAGE_SIZE
));
364 ip
->len
= roundup(ip
->len
, VM_PAGE_SIZE
);
365 free_mem(ABS2CLICK(ip
->start_addr
), ABS2CLICK(ip
->len
));
368 /* Set up table of calls. */
369 #define CALLMAP(code, func) { int i; \
370 i=CALLNUMBER(code); \
372 assert(i < NR_VM_CALLS); \
373 vm_calls[i].vmc_func = (func); \
374 vm_calls[i].vmc_name = #code; \
377 /* Set call table to 0. This invalidates all calls (clear
380 memset(vm_calls
, 0, sizeof(vm_calls
));
382 /* Basic VM calls. */
383 CALLMAP(VM_MMAP
, do_mmap
);
384 CALLMAP(VM_MUNMAP
, do_munmap
);
385 CALLMAP(VM_MUNMAP_TEXT
, do_munmap
);
386 CALLMAP(VM_MAP_PHYS
, do_map_phys
);
387 CALLMAP(VM_UNMAP_PHYS
, do_unmap_phys
);
390 CALLMAP(VM_EXIT
, do_exit
);
391 CALLMAP(VM_FORK
, do_fork
);
392 CALLMAP(VM_BRK
, do_brk
);
393 CALLMAP(VM_WILLEXIT
, do_willexit
);
394 CALLMAP(VM_NOTIFY_SIG
, do_notify_sig
);
397 CALLMAP(VM_RS_SET_PRIV
, do_rs_set_priv
);
398 CALLMAP(VM_RS_UPDATE
, do_rs_update
);
399 CALLMAP(VM_RS_MEMCTL
, do_rs_memctl
);
401 /* Calls from RS/VFS */
402 CALLMAP(VM_PROCCTL
, do_procctl
);
405 CALLMAP(VM_REMAP
, do_remap
);
406 CALLMAP(VM_REMAP_RO
, do_remap
);
407 CALLMAP(VM_GETPHYS
, do_get_phys
);
408 CALLMAP(VM_SHM_UNMAP
, do_shared_unmap
);
409 CALLMAP(VM_GETREF
, do_get_refcount
);
410 CALLMAP(VM_INFO
, do_info
);
411 CALLMAP(VM_QUERY_EXIT
, do_query_exit
);
412 CALLMAP(VM_WATCH_EXIT
, do_watch_exit
);
413 CALLMAP(VM_FORGETBLOCKS
, do_forgetblocks
);
414 CALLMAP(VM_FORGETBLOCK
, do_forgetblock
);
415 CALLMAP(VM_YIELDBLOCKGETBLOCK
, do_yieldblockgetblock
);
417 /* Initialize the structures for queryexit */
421 /*===========================================================================*
422 * sef_cb_signal_handler *
423 *===========================================================================*/
424 static void sef_cb_signal_handler(int signo
)
426 /* Check for known kernel signals, ignore anything else. */
428 /* There is a pending memory request from the kernel. */
434 /* It can happen that we get stuck receiving signals
435 * without sef_receive() returning. We could need more memory
438 if(missing_spares
> 0) {
439 pt_cycle(); /* pagetable code wants to be called */
445 /*===========================================================================*
447 *===========================================================================*/
448 static int map_service(rpub
)
449 struct rprocpub
*rpub
;
451 /* Map a new service by initializing its call mask. */
454 if ((r
= vm_isokendpt(rpub
->endpoint
, &proc_nr
)) != OK
) {
458 /* Copy the call mask. */
459 memcpy(&vmproc
[proc_nr
].vm_call_mask
, &rpub
->vm_call_mask
,
460 sizeof(vmproc
[proc_nr
].vm_call_mask
));
465 /*===========================================================================*
467 *===========================================================================*/
468 static int vm_acl_ok(endpoint_t caller
, int call
)
472 if ((r
= vm_isokendpt(caller
, &n
)) != OK
)
473 panic("VM: from strange source: %d", caller
);
475 /* See if the call is allowed. */
476 if (!GET_BIT(vmproc
[n
].vm_call_mask
, call
)) {