tty: don't use custom kputc; this fixes tty printf()s.
[minix.git] / servers / vm / exec.c
blobba0a534ec37d2561287be777f502dc52022ce5b9
2 #define _SYSTEM 1
4 #include <minix/callnr.h>
5 #include <minix/com.h>
6 #include <minix/config.h>
7 #include <minix/const.h>
8 #include <minix/ds.h>
9 #include <minix/endpoint.h>
10 #include <minix/keymap.h>
11 #include <minix/minlib.h>
12 #include <minix/type.h>
13 #include <minix/ipc.h>
14 #include <minix/sysutil.h>
15 #include <minix/syslib.h>
16 #include <minix/const.h>
17 #include <minix/bitmap.h>
19 #include <errno.h>
20 #include <assert.h>
21 #include <env.h>
22 #include <pagetable.h>
24 #include "glo.h"
25 #include "proto.h"
26 #include "util.h"
27 #include "vm.h"
28 #include "region.h"
29 #include "sanitycheck.h"
31 #include "memory.h"
33 FORWARD _PROTOTYPE( int new_mem, (struct vmproc *vmp, struct vmproc *sh_vmp,
34 vir_bytes text_bytes, vir_bytes data_bytes, vir_bytes bss_bytes,
35 vir_bytes stk_bytes, phys_bytes tot_bytes, vir_bytes *stack_top));
37 static int failcount;
39 /*===========================================================================*
40 * find_share *
41 *===========================================================================*/
42 PUBLIC struct vmproc *find_share(
43 struct vmproc *vmp_ign, /* process that should not be looked at */
44 ino_t ino, /* parameters that uniquely identify a file */
45 dev_t dev,
46 time_t ctime
49 /* Look for a process that is the file <ino, dev, ctime> in execution. Don't
50 * accidentally "find" vmp_ign, because it is the process on whose behalf this
51 * call is made.
53 struct vmproc *vmp;
54 for (vmp = &vmproc[0]; vmp < &vmproc[NR_PROCS]; vmp++) {
55 if (!(vmp->vm_flags & VMF_INUSE)) continue;
56 if (!(vmp->vm_flags & VMF_SEPARATE)) continue;
57 if (vmp->vm_flags & VMF_HASPT) continue;
58 if (vmp == vmp_ign) continue;
59 if (vmp->vm_ino != ino) continue;
60 if (vmp->vm_dev != dev) continue;
61 if (vmp->vm_ctime != ctime) continue;
62 return vmp;
64 return(NULL);
68 /*===========================================================================*
69 * exec_newmem *
70 *===========================================================================*/
71 PUBLIC int do_exec_newmem(message *msg)
73 int r, proc_e, proc_n;
74 vir_bytes stack_top;
75 vir_clicks tc, dc, sc, totc, dvir, s_vir;
76 struct vmproc *vmp, *sh_mp;
77 char *ptr;
78 struct exec_newmem args;
80 SANITYCHECK(SCL_FUNCTIONS);
82 proc_e= msg->VMEN_ENDPOINT;
83 if (vm_isokendpt(proc_e, &proc_n) != OK)
85 printf("VM: exec_newmem: bad endpoint %d from %d\n",
86 proc_e, msg->m_source);
87 return ESRCH;
89 vmp= &vmproc[proc_n];
90 ptr= msg->VMEN_ARGSPTR;
92 if(msg->VMEN_ARGSSIZE != sizeof(args)) {
93 printf("VM: exec_newmem: args size %d != %ld\n",
94 msg->VMEN_ARGSSIZE, sizeof(args));
95 return EINVAL;
97 SANITYCHECK(SCL_DETAIL);
99 r= sys_datacopy(msg->m_source, (vir_bytes)ptr,
100 SELF, (vir_bytes)&args, sizeof(args));
101 if (r != OK)
102 panic("exec_newmem: sys_datacopy failed: %d", r);
104 /* Minimum stack region (not preallocated)
105 * Stopgap for better rlimit-based stack size system
107 if(args.tot_bytes < MINSTACKREGION) {
108 args.tot_bytes = MINSTACKREGION;
111 /* Check to see if segment sizes are feasible. */
112 tc = (vir_clicks) (CLICK_CEIL(args.text_bytes) >> CLICK_SHIFT);
113 dc = (vir_clicks) (CLICK_CEIL(args.data_bytes+args.bss_bytes) >> CLICK_SHIFT);
114 totc = (vir_clicks) (CLICK_CEIL(args.tot_bytes) >> CLICK_SHIFT);
115 sc = (vir_clicks) (CLICK_CEIL(args.args_bytes) >> CLICK_SHIFT);
116 if (dc >= totc) {
117 printf("VM: newmem: no stack?\n");
118 return(ENOEXEC); /* stack must be at least 1 click */
121 dvir = (args.sep_id ? 0 : tc);
122 s_vir = dvir + (totc - sc);
123 r = (dvir + dc > s_vir) ? ENOMEM : OK;
124 if (r != OK) {
125 printf("VM: newmem: no virtual space?\n");
126 return r;
129 /* Can the process' text be shared with that of one already running? */
130 if(!vm_paged) {
131 sh_mp = find_share(vmp, args.st_ino, args.st_dev, args.st_ctime);
132 } else {
133 sh_mp = NULL;
136 /* Allocate new memory and release old memory. Fix map and tell
137 * kernel.
139 r = new_mem(vmp, sh_mp, args.text_bytes, args.data_bytes,
140 args.bss_bytes, args.args_bytes, args.tot_bytes, &stack_top);
141 if (r != OK) {
142 printf("VM: newmem: new_mem failed\n");
143 return(r);
146 /* Save file identification to allow it to be shared. */
147 vmp->vm_ino = args.st_ino;
148 vmp->vm_dev = args.st_dev;
149 vmp->vm_ctime = args.st_ctime;
151 /* set/clear separate I&D flag */
152 if (args.sep_id)
153 vmp->vm_flags |= VMF_SEPARATE;
154 else
155 vmp->vm_flags &= ~VMF_SEPARATE;
157 msg->VMEN_STACK_TOP = (void *) stack_top;
158 msg->VMEN_FLAGS = 0;
159 if (!sh_mp) /* Load text if sh_mp = NULL */
160 msg->VMEN_FLAGS |= EXC_NM_RF_LOAD_TEXT;
162 return OK;
165 /*===========================================================================*
166 * new_mem *
167 *===========================================================================*/
168 PRIVATE int new_mem(rmp, sh_mp, text_bytes, data_bytes,
169 bss_bytes,stk_bytes,tot_bytes,stack_top)
170 struct vmproc *rmp; /* process to get a new memory map */
171 struct vmproc *sh_mp; /* text can be shared with this process */
172 vir_bytes text_bytes; /* text segment size in bytes */
173 vir_bytes data_bytes; /* size of initialized data in bytes */
174 vir_bytes bss_bytes; /* size of bss in bytes */
175 vir_bytes stk_bytes; /* size of initial stack segment in bytes */
176 phys_bytes tot_bytes; /* total memory to allocate, including gap */
177 vir_bytes *stack_top; /* top of process stack */
179 /* Allocate new memory and release the old memory. Change the map and report
180 * the new map to the kernel. Zero the new core image's bss, gap and stack.
183 vir_clicks text_clicks, data_clicks, gap_clicks, stack_clicks, tot_clicks;
184 phys_bytes bytes, base, bss_offset;
185 int s, r2, r, hadpt = 0;
186 struct vmproc *vmpold = &vmproc[VMP_EXECTMP];
188 SANITYCHECK(SCL_FUNCTIONS);
190 if(rmp->vm_flags & VMF_HASPT) {
191 hadpt = 1;
194 /* No need to allocate text if it can be shared. */
195 if (sh_mp != NULL) {
196 text_bytes = 0;
197 assert(!vm_paged);
200 /* Acquire the new memory. Each of the 4 parts: text, (data+bss), gap,
201 * and stack occupies an integral number of clicks, starting at click
202 * boundary. The data and bss parts are run together with no space.
204 text_clicks = (vir_clicks) (CLICK_CEIL(text_bytes) >> CLICK_SHIFT);
205 data_clicks = (vir_clicks) (CLICK_CEIL(data_bytes + bss_bytes) >> CLICK_SHIFT);
206 stack_clicks = (vir_clicks) (CLICK_CEIL(stk_bytes) >> CLICK_SHIFT);
207 tot_clicks = (vir_clicks) (CLICK_CEIL(tot_bytes) >> CLICK_SHIFT);
208 gap_clicks = tot_clicks - data_clicks - stack_clicks;
209 if ( (int) gap_clicks < 0) {
210 printf("VM: new_mem: no gap?\n");
211 return(ENOMEM);
215 /* Keep previous process state for recovery; the sanity check functions
216 * know about the 'vmpold' slot, so the memory that the exec()ing
217 * process is still holding is referenced there.
219 * Throw away the old page table to avoid having two process slots
220 * using the same vm_pt.
221 * Just recreate it in the case that we have to revert.
223 SANITYCHECK(SCL_DETAIL);
224 if(hadpt) {
225 rmp->vm_flags &= ~VMF_HASPT;
226 pt_free(&rmp->vm_pt);
228 assert(!(vmpold->vm_flags & VMF_INUSE));
229 *vmpold = *rmp; /* copy current state. */
230 rmp->vm_regions = NULL; /* exec()ing process regions thrown out. */
231 SANITYCHECK(SCL_DETAIL);
233 if(!hadpt) {
234 if (find_share(rmp, rmp->vm_ino, rmp->vm_dev, rmp->vm_ctime) == NULL) {
235 /* No other process shares the text segment, so free it. */
236 free_mem(rmp->vm_arch.vm_seg[T].mem_phys, rmp->vm_arch.vm_seg[T].mem_len);
239 /* Free the data and stack segments. */
240 free_mem(rmp->vm_arch.vm_seg[D].mem_phys,
241 rmp->vm_arch.vm_seg[S].mem_vir
242 + rmp->vm_arch.vm_seg[S].mem_len
243 - rmp->vm_arch.vm_seg[D].mem_vir);
246 /* Build new process in current slot, without freeing old
247 * one. If it fails, revert.
250 if(vm_paged) {
251 int ptok = 1;
252 SANITYCHECK(SCL_DETAIL);
253 if((r=pt_new(&rmp->vm_pt)) != OK) {
254 ptok = 0;
255 printf("exec_newmem: no new pagetable\n");
258 SANITYCHECK(SCL_DETAIL);
259 if(r != OK || (r=proc_new(rmp,
260 VM_PROCSTART, /* where to start the process in the page table */
261 CLICK2ABS(text_clicks),/* how big is the text in bytes, page-aligned */
262 CLICK2ABS(data_clicks),/* how big is data+bss, page-aligned */
263 CLICK2ABS(stack_clicks),/* how big is stack, page-aligned */
264 CLICK2ABS(gap_clicks), /* how big is gap, page-aligned */
265 0,0, /* not preallocated */
266 VM_STACKTOP, /* regular stack top */
267 0)) != OK) {
268 SANITYCHECK(SCL_DETAIL);
269 printf("VM: new_mem: failed\n");
270 if(ptok) {
271 rmp->vm_flags &= ~VMF_HASPT;
272 pt_free(&rmp->vm_pt);
274 *rmp = *vmpold; /* undo. */
275 clear_proc(vmpold); /* disappear. */
276 SANITYCHECK(SCL_DETAIL);
277 if(hadpt) {
278 if(pt_new(&rmp->vm_pt) != OK) {
279 /* We secretly know that making a new pagetable
280 * in the same slot if one was there will never fail.
282 panic("new_mem: pt_new failed: %d", ENOMEM);
284 rmp->vm_flags |= VMF_HASPT;
285 SANITYCHECK(SCL_DETAIL);
286 if(map_writept(rmp) != OK) {
287 printf("VM: warning: exec undo failed\n");
289 SANITYCHECK(SCL_DETAIL);
291 return r;
293 SANITYCHECK(SCL_DETAIL);
294 /* new process is made; free and unreference
295 * page table and memory still held by exec()ing process.
297 SANITYCHECK(SCL_DETAIL);
298 free_proc(vmpold);
299 clear_proc(vmpold); /* disappear. */
300 SANITYCHECK(SCL_DETAIL);
301 *stack_top = VM_STACKTOP;
302 } else {
303 phys_clicks new_base;
305 new_base = alloc_mem(text_clicks + tot_clicks, 0);
306 if (new_base == NO_MEM) {
307 printf("VM: new_mem: alloc_mem failed\n");
308 return(ENOMEM);
311 if (sh_mp != NULL) {
312 /* Share the text segment. */
313 rmp->vm_arch.vm_seg[T] = sh_mp->vm_arch.vm_seg[T];
314 } else {
315 rmp->vm_arch.vm_seg[T].mem_phys = new_base;
316 rmp->vm_arch.vm_seg[T].mem_vir = 0;
317 rmp->vm_arch.vm_seg[T].mem_len = text_clicks;
319 if (text_clicks > 0)
321 /* Zero the last click of the text segment. Otherwise the
322 * part of that click may remain unchanged.
324 base = (phys_bytes)(new_base+text_clicks-1) << CLICK_SHIFT;
325 if ((s= sys_memset(0, base, CLICK_SIZE)) != OK)
326 panic("new_mem: sys_memset failed: %d", s);
330 /* No paging stuff. */
331 rmp->vm_flags &= ~VMF_HASPT;
332 rmp->vm_regions = NULL;
334 rmp->vm_arch.vm_seg[D].mem_phys = new_base + text_clicks;
335 rmp->vm_arch.vm_seg[D].mem_vir = 0;
336 rmp->vm_arch.vm_seg[D].mem_len = data_clicks;
337 rmp->vm_arch.vm_seg[S].mem_phys = rmp->vm_arch.vm_seg[D].mem_phys +
338 data_clicks + gap_clicks;
339 rmp->vm_arch.vm_seg[S].mem_vir = rmp->vm_arch.vm_seg[D].mem_vir +
340 data_clicks + gap_clicks;
341 rmp->vm_arch.vm_seg[S].mem_len = stack_clicks;
342 rmp->vm_stacktop =
343 CLICK2ABS(rmp->vm_arch.vm_seg[S].mem_vir +
344 rmp->vm_arch.vm_seg[S].mem_len);
346 rmp->vm_arch.vm_data_top =
347 (rmp->vm_arch.vm_seg[S].mem_vir +
348 rmp->vm_arch.vm_seg[S].mem_len) << CLICK_SHIFT;
350 if((r2=sys_newmap(rmp->vm_endpoint, rmp->vm_arch.vm_seg)) != OK) {
351 /* report new map to the kernel */
352 panic("sys_newmap failed: %d", r2);
355 /* Zero the bss, gap, and stack segment. */
356 bytes = (phys_bytes)(data_clicks + gap_clicks + stack_clicks) << CLICK_SHIFT;
357 base = (phys_bytes) rmp->vm_arch.vm_seg[D].mem_phys << CLICK_SHIFT;
358 bss_offset = (data_bytes >> CLICK_SHIFT) << CLICK_SHIFT;
359 base += bss_offset;
360 bytes -= bss_offset;
362 if ((s=sys_memset(0, base, bytes)) != OK) {
363 panic("new_mem can't zero: %d", s);
366 /* Tell kernel this thing has no page table. */
367 if((s=pt_bind(NULL, rmp)) != OK)
368 panic("exec_newmem: pt_bind failed: %d", s);
369 *stack_top= ((vir_bytes)rmp->vm_arch.vm_seg[S].mem_vir << CLICK_SHIFT) +
370 ((vir_bytes)rmp->vm_arch.vm_seg[S].mem_len << CLICK_SHIFT);
373 SANITYCHECK(SCL_FUNCTIONS);
375 return(OK);
378 /*===========================================================================*
379 * find_kernel_top *
380 *===========================================================================*/
381 PUBLIC phys_bytes find_kernel_top(void)
383 /* Find out where the kernel is, so we know where to start mapping
384 * user processes.
386 u32_t kernel_top = 0;
387 #define MEMTOP(v, i) \
388 (vmproc[v].vm_arch.vm_seg[i].mem_phys + vmproc[v].vm_arch.vm_seg[i].mem_len)
389 assert(vmproc[VMP_SYSTEM].vm_flags & VMF_INUSE);
390 kernel_top = MEMTOP(VMP_SYSTEM, T);
391 kernel_top = MAX(kernel_top, MEMTOP(VMP_SYSTEM, D));
392 kernel_top = MAX(kernel_top, MEMTOP(VMP_SYSTEM, S));
393 assert(kernel_top);
395 return CLICK2ABS(kernel_top);
398 /*===========================================================================*
399 * proc_new *
400 *===========================================================================*/
401 PUBLIC int proc_new(struct vmproc *vmp,
402 phys_bytes vstart, /* where to start the process in page table */
403 phys_bytes text_bytes, /* how much code, in bytes but page aligned */
404 phys_bytes data_bytes, /* how much data + bss, in bytes but page aligned */
405 phys_bytes stack_bytes, /* stack space to reserve, in bytes, page aligned */
406 phys_bytes gap_bytes, /* gap bytes, page aligned */
407 phys_bytes text_start, /* text starts here, if preallocated, otherwise 0 */
408 phys_bytes data_start, /* data starts here, if preallocated, otherwise 0 */
409 phys_bytes stacktop,
410 int prealloc_stack
413 int s;
414 vir_bytes hole_bytes;
415 int prealloc;
416 struct vir_region *reg;
418 assert(!(vstart % VM_PAGE_SIZE));
419 assert(!(text_bytes % VM_PAGE_SIZE));
420 assert(!(data_bytes % VM_PAGE_SIZE));
421 assert(!(stack_bytes % VM_PAGE_SIZE));
422 assert(!(gap_bytes % VM_PAGE_SIZE));
423 assert(!(text_start % VM_PAGE_SIZE));
424 assert(!(data_start % VM_PAGE_SIZE));
425 assert((!text_start && !data_start) || (text_start && data_start));
427 /* Place text at start of process. */
428 vmp->vm_arch.vm_seg[T].mem_phys = ABS2CLICK(vstart);
429 vmp->vm_arch.vm_seg[T].mem_vir = 0;
430 vmp->vm_arch.vm_seg[T].mem_len = ABS2CLICK(text_bytes);
432 vmp->vm_offset = vstart;
434 /* page mapping flags for code */
435 #define TEXTFLAGS (PTF_PRESENT | PTF_USER)
436 SANITYCHECK(SCL_DETAIL);
437 if(text_bytes > 0) {
438 if(!(reg=map_page_region(vmp, vstart, 0, text_bytes,
439 text_start ? text_start : MAP_NONE,
440 VR_ANON | VR_WRITABLE, text_start ? 0 : MF_PREALLOC))) {
441 SANITYCHECK(SCL_DETAIL);
442 printf("VM: proc_new: map_page_region failed (text)\n");
443 map_free_proc(vmp);
444 SANITYCHECK(SCL_DETAIL);
445 return(ENOMEM);
447 map_region_set_tag(reg, VRT_TEXT);
448 SANITYCHECK(SCL_DETAIL);
450 SANITYCHECK(SCL_DETAIL);
452 /* Allocate memory for data (including bss, but not including gap
453 * or stack), make sure it's cleared, and map it in after text
454 * (if any).
456 if(!(vmp->vm_heap = map_page_region(vmp, vstart + text_bytes, 0,
457 data_bytes, data_start ? data_start : MAP_NONE, VR_ANON | VR_WRITABLE,
458 data_start ? 0 : MF_PREALLOC))) {
459 printf("VM: exec: map_page_region for data failed\n");
460 map_free_proc(vmp);
461 SANITYCHECK(SCL_DETAIL);
462 return ENOMEM;
465 /* Tag the heap so brk() call knows which region to extend. */
466 map_region_set_tag(vmp->vm_heap, VRT_HEAP);
468 /* How many address space clicks between end of data
469 * and start of stack?
470 * stacktop is the first address after the stack, as addressed
471 * from within the user process.
473 hole_bytes = stacktop - data_bytes - stack_bytes - gap_bytes;
475 if(!(reg=map_page_region(vmp,
476 vstart + text_bytes + data_bytes + hole_bytes,
477 0, stack_bytes + gap_bytes, MAP_NONE,
478 VR_ANON | VR_WRITABLE, prealloc_stack ? MF_PREALLOC : 0)) != OK) {
479 panic("map_page_region failed for stack");
482 map_region_set_tag(reg, VRT_STACK);
484 vmp->vm_arch.vm_seg[D].mem_phys = ABS2CLICK(vstart + text_bytes);
485 vmp->vm_arch.vm_seg[D].mem_vir = 0;
486 vmp->vm_arch.vm_seg[D].mem_len = ABS2CLICK(data_bytes);
488 vmp->vm_arch.vm_seg[S].mem_phys = ABS2CLICK(vstart +
489 text_bytes + data_bytes + gap_bytes + hole_bytes);
490 vmp->vm_arch.vm_seg[S].mem_vir = ABS2CLICK(data_bytes + gap_bytes + hole_bytes);
492 /* Where are we allowed to start using the rest of the virtual
493 * address space?
495 vmp->vm_stacktop = stacktop;
497 vmp->vm_flags |= VMF_HASPT;
499 if(vmp->vm_endpoint != NONE) {
501 /* Pretend the stack is the full size of the data segment, so
502 * we get a full-sized data segment, up to VM_DATATOP.
503 * After sys_newmap(), change the stack to what we know the
504 * stack to be (up to stacktop).
506 vmp->vm_arch.vm_seg[S].mem_len = (VM_DATATOP >> CLICK_SHIFT) -
507 vmp->vm_arch.vm_seg[S].mem_vir - ABS2CLICK(vstart) - ABS2CLICK(text_bytes);
509 /* What is the final size of the data segment in bytes? */
510 vmp->vm_arch.vm_data_top =
511 (vmp->vm_arch.vm_seg[S].mem_vir +
512 vmp->vm_arch.vm_seg[S].mem_len) << CLICK_SHIFT;
514 if((s=sys_newmap(vmp->vm_endpoint, vmp->vm_arch.vm_seg)) != OK)
515 panic("sys_newmap (vm) failed: %d", s);
516 if((s=pt_bind(&vmp->vm_pt, vmp)) != OK)
517 panic("exec_newmem: pt_bind failed: %d", s);
520 return OK;