6 #include <minix/callnr.h>
8 #include <minix/config.h>
9 #include <minix/const.h>
11 #include <minix/endpoint.h>
12 #include <minix/keymap.h>
13 #include <minix/minlib.h>
14 #include <minix/type.h>
15 #include <minix/ipc.h>
16 #include <minix/sysutil.h>
17 #include <minix/syslib.h>
18 #include <minix/const.h>
32 #include "sanitycheck.h"
34 #include <archtypes.h>
35 #include "../../kernel/const.h"
36 #include "../../kernel/config.h"
37 #include "../../kernel/proc.h"
41 #define MAXMASK (sizeof(mask_t)*8)
42 #define ANYEPM (MINEPM+MAXMASK-1)
43 #define MAXEPM (ANYEPM-1)
44 #define EPM(e) ((1L) << ((e)-MINEPM))
45 #define EPMOK(mask, ep) (((mask) & EPM(ANYEPM)) || ((ep) >= MINEPM && (ep) <= MAXEPM && (EPM(ep) & (mask))))
47 /* Table of calls and a macro to test for being in range. */
49 mask_t vmc_callers
; /* bitmap of endpoint numbers */
50 int (*vmc_func
)(message
*); /* Call handles message. */
51 char *vmc_name
; /* Human-readable string. */
52 } vm_calls
[VM_NCALLS
];
54 /* Macro to verify call range and map 'high' range to 'base' range
55 * (starting at 0) in one. Evaluates to zero-based call number if call
56 * number is valid, returns -1 otherwise.
58 #define CALLNUMBER(c) (((c) >= VM_RQ_BASE && \
59 (c) < VM_RQ_BASE + ELEMENTS(vm_calls)) ? \
60 ((c) - VM_RQ_BASE) : -1)
62 FORWARD
_PROTOTYPE(void vm_init
, (void));
65 extern int kputc_use_private_grants
;
68 /*===========================================================================*
70 *===========================================================================*/
77 memcpy(data1
, CHECKADDR
, sizeof(data1
));
82 env_parse("vm_paged", "d", 0, &vm_paged
, 0, 1);
84 env_parse("vm_sanitychecklevel", "d", 0, &vm_sanitychecklevel
, 0, SCL_MAX
);
92 /* This is VM's main loop. */
97 pt_cycle(); /* pagetable code wants to be called */
101 SANITYCHECK(SCL_DETAIL
);
103 if ((r
=receive(ANY
, &msg
)) != OK
)
104 vm_panic("receive() error", r
);
106 SANITYCHECK(SCL_DETAIL
);
108 if(msg
.m_type
& NOTIFY_MESSAGE
) {
109 switch(msg
.m_source
) {
111 /* Kernel wants to have memory ranges
117 /* PM sends a notify() on shutdown, which
118 * is OK and we ignore.
122 /* This indicates a page fault has happened,
123 * which we have to handle.
128 /* No-one else should send us notifies. */
129 printf("VM: ignoring notify() from %d\n",
135 who_e
= msg
.m_source
;
136 c
= msg
.m_type
- VM_RQ_BASE
;
137 result
= ENOSYS
; /* Out of range or restricted calls return this. */
138 if((c
=CALLNUMBER(msg
.m_type
)) < 0 || !vm_calls
[c
].vmc_func
) {
139 printf("VM: out of range or missing callnr %d from %d\n",
140 msg
.m_type
, msg
.m_source
);
141 } else if(!EPMOK(vm_calls
[c
].vmc_callers
, msg
.m_source
)) {
142 printf("VM: restricted call %s from %d instead of 0x%lx\n",
143 vm_calls
[c
].vmc_name
, msg
.m_source
,
144 vm_calls
[c
].vmc_callers
);
146 SANITYCHECK(SCL_FUNCTIONS
);
147 result
= vm_calls
[c
].vmc_func(&msg
);
148 SANITYCHECK(SCL_FUNCTIONS
);
151 /* Send reply message, unless the return code is SUSPEND,
152 * which is a pseudo-result suppressing the reply message.
154 if(result
!= SUSPEND
) {
155 SANITYCHECK(SCL_DETAIL
);
157 if((r
=send(who_e
, &msg
)) != OK
) {
158 printf("VM: couldn't send %d to %d (err %d)\n",
159 msg
.m_type
, who_e
, r
);
160 vm_panic("send() error", NO_NUM
);
162 SANITYCHECK(SCL_DETAIL
);
164 SANITYCHECK(SCL_DETAIL
);
169 /*===========================================================================*
171 *===========================================================================*/
172 PRIVATE
void vm_init(void)
175 struct memory mem_chunks
[NR_MEMS
];
176 struct boot_image image
[NR_BOOT_PROCS
];
177 struct boot_image
*ip
;
179 /* Get chunks of available memory. */
180 get_mem_chunks(mem_chunks
);
182 /* Initialize VM's process table. Request a copy of the system
183 * image table that is defined at the kernel level to see which
186 if (OK
!= (s
=sys_getimage(image
)))
187 vm_panic("couldn't get image table: %d\n", s
);
189 /* Set table to 0. This invalidates all slots (clear VMF_INUSE). */
190 memset(vmproc
, 0, sizeof(vmproc
));
192 /* Walk through boot-time system processes that are alive
193 * now and make valid slot entries for them.
195 for (ip
= &image
[0]; ip
< &image
[NR_BOOT_PROCS
]; ip
++) {
198 if(ip
->proc_nr
>= _NR_PROCS
) { vm_panic("proc", ip
->proc_nr
); }
199 if(ip
->proc_nr
< 0 && ip
->proc_nr
!= SYSTEM
) continue;
201 #define GETVMP(v, nr) \
203 vmp = &vmproc[ip->proc_nr]; \
204 } else if(nr == SYSTEM) { \
205 vmp = &vmproc[VMP_SYSTEM]; \
207 vm_panic("init: crazy proc_nr", nr); \
210 /* Initialize normal process table slot or special SYSTEM
211 * table slot. Kernel memory is already reserved.
213 GETVMP(vmp
, ip
->proc_nr
);
215 /* reset fields as if exited */
218 /* Get memory map for this process from the kernel. */
219 if ((s
=get_mem_map(ip
->proc_nr
, vmp
->vm_arch
.vm_seg
)) != OK
)
220 vm_panic("couldn't get process mem_map",s
);
222 /* Remove this memory from the free list. */
223 reserve_proc_mem(mem_chunks
, vmp
->vm_arch
.vm_seg
);
225 vmp
->vm_flags
= VMF_INUSE
;
226 vmp
->vm_endpoint
= ip
->endpoint
;
228 CLICK2ABS(vmp
->vm_arch
.vm_seg
[S
].mem_vir
+
229 vmp
->vm_arch
.vm_seg
[S
].mem_len
);
231 if (vmp
->vm_arch
.vm_seg
[T
].mem_len
!= 0)
232 vmp
->vm_flags
|= VMF_SEPARATE
;
236 /* Let architecture-dependent VM initialization use some memory. */
237 arch_init_vm(mem_chunks
);
239 /* Architecture-dependent initialization. */
242 /* Initialize tables to all physical memory. */
243 mem_init(mem_chunks
);
245 /* Bits of code need to know where a process can
246 * start in a pagetable.
248 kernel_top_bytes
= find_kernel_top();
250 /* Can first kernel pages of code and data be (left) mapped out?
251 * If so, change the SYSTEM process' memory map to reflect this
252 * (future mappings of SYSTEM into other processes will not include
253 * first pages), and free the first pages.
255 if(vm_paged
&& sys_vmctl(SELF
, VMCTL_NOPAGEZERO
, 0) == OK
) {
257 vmp
= &vmproc
[VMP_SYSTEM
];
258 if(vmp
->vm_arch
.vm_seg
[T
].mem_len
> 0) {
259 #define DIFF CLICKSPERPAGE
260 vmp
->vm_arch
.vm_seg
[T
].mem_phys
+= DIFF
;
261 vmp
->vm_arch
.vm_seg
[T
].mem_len
-= DIFF
;
263 vmp
->vm_arch
.vm_seg
[D
].mem_phys
+= DIFF
;
264 vmp
->vm_arch
.vm_seg
[D
].mem_len
-= DIFF
;
267 /* Give these processes their own page table. */
268 for (ip
= &image
[0]; ip
< &image
[NR_BOOT_PROCS
]; ip
++) {
271 vir_bytes old_stacktop
, old_stack
;
273 if(ip
->proc_nr
< 0) continue;
275 GETVMP(vmp
, ip
->proc_nr
);
278 vmp
->vm_arch
.vm_seg
[S
].mem_vir
+
279 vmp
->vm_arch
.vm_seg
[S
].mem_len
-
280 vmp
->vm_arch
.vm_seg
[D
].mem_len
;
282 if(!(ip
->flags
& PROC_FULLVM
))
285 if(pt_new(&vmp
->vm_pt
) != OK
)
286 vm_panic("vm_init: no new pagetable", NO_NUM
);
287 #define BASICSTACK VM_PAGE_SIZE
288 old_stacktop
= CLICK2ABS(vmp
->vm_arch
.vm_seg
[S
].mem_vir
+
289 vmp
->vm_arch
.vm_seg
[S
].mem_len
);
290 if(sys_vmctl(vmp
->vm_endpoint
, VMCTL_INCSP
,
291 VM_STACKTOP
- old_stacktop
) != OK
) {
292 vm_panic("VM: vmctl for new stack failed", NO_NUM
);
295 FREE_MEM(vmp
->vm_arch
.vm_seg
[D
].mem_phys
+
296 vmp
->vm_arch
.vm_seg
[D
].mem_len
,
300 CLICK2ABS(vmp
->vm_arch
.vm_seg
[T
].mem_phys
),
301 CLICK2ABS(vmp
->vm_arch
.vm_seg
[T
].mem_len
),
302 CLICK2ABS(vmp
->vm_arch
.vm_seg
[D
].mem_len
),
304 CLICK2ABS(vmp
->vm_arch
.vm_seg
[S
].mem_vir
+
305 vmp
->vm_arch
.vm_seg
[S
].mem_len
-
306 vmp
->vm_arch
.vm_seg
[D
].mem_len
) - BASICSTACK
,
307 CLICK2ABS(vmp
->vm_arch
.vm_seg
[T
].mem_phys
),
308 CLICK2ABS(vmp
->vm_arch
.vm_seg
[D
].mem_phys
),
312 /* Set up table of calls. */
313 #define CALLMAP(code, func, thecaller) { int i; \
314 if((i=CALLNUMBER(code)) < 0) { vm_panic(#code " invalid", (code)); } \
315 if(i >= VM_NCALLS) { vm_panic(#code " invalid", (code)); } \
316 vm_calls[i].vmc_func = (func); \
317 vm_calls[i].vmc_name = #code; \
318 if(((thecaller) < MINEPM || (thecaller) > MAXEPM) \
319 && (thecaller) != ANYEPM) { \
320 vm_panic(#thecaller " invalid", (code)); \
322 vm_calls[i].vmc_callers |= EPM(thecaller); \
325 /* Set call table to 0. This invalidates all calls (clear
328 memset(vm_calls
, 0, sizeof(vm_calls
));
330 /* Requests from PM (restricted to be from PM only). */
331 CALLMAP(VM_EXIT
, do_exit
, PM_PROC_NR
);
332 CALLMAP(VM_FORK
, do_fork
, PM_PROC_NR
);
333 CALLMAP(VM_BRK
, do_brk
, PM_PROC_NR
);
334 CALLMAP(VM_EXEC_NEWMEM
, do_exec_newmem
, PM_PROC_NR
);
335 CALLMAP(VM_PUSH_SIG
, do_push_sig
, PM_PROC_NR
);
336 CALLMAP(VM_WILLEXIT
, do_willexit
, PM_PROC_NR
);
337 CALLMAP(VM_ADDDMA
, do_adddma
, PM_PROC_NR
);
338 CALLMAP(VM_DELDMA
, do_deldma
, PM_PROC_NR
);
339 CALLMAP(VM_GETDMA
, do_getdma
, PM_PROC_NR
);
340 CALLMAP(VM_ALLOCMEM
, do_allocmem
, PM_PROC_NR
);
342 /* Physical mapping requests.
343 * tty (for /dev/video) does this.
344 * memory (for /dev/mem) does this.
346 CALLMAP(VM_MAP_PHYS
, do_map_phys
, TTY_PROC_NR
);
347 CALLMAP(VM_UNMAP_PHYS
, do_unmap_phys
, TTY_PROC_NR
);
348 CALLMAP(VM_MAP_PHYS
, do_map_phys
, MEM_PROC_NR
);
349 CALLMAP(VM_UNMAP_PHYS
, do_unmap_phys
, MEM_PROC_NR
);
351 /* Requests from userland (source unrestricted). */
352 CALLMAP(VM_MMAP
, do_mmap
, ANYEPM
);
354 /* Requests (actually replies) from VFS (restricted to VFS only). */
355 CALLMAP(VM_VFS_REPLY_OPEN
, do_vfs_reply
, VFS_PROC_NR
);
356 CALLMAP(VM_VFS_REPLY_MMAP
, do_vfs_reply
, VFS_PROC_NR
);
357 CALLMAP(VM_VFS_REPLY_CLOSE
, do_vfs_reply
, VFS_PROC_NR
);