Improve the process for GNU tools
[minix3.git] / minix / servers / vm / pagefaults.c
blob23e122f4fcb33b1e47555e7f2c51b9f7146fb2bf
2 #define _SYSTEM 1
4 #include <minix/callnr.h>
5 #include <minix/com.h>
6 #include <minix/config.h>
7 #include <minix/const.h>
8 #include <minix/ds.h>
9 #include <minix/endpoint.h>
10 #include <minix/minlib.h>
11 #include <minix/type.h>
12 #include <minix/ipc.h>
13 #include <minix/sysutil.h>
14 #include <minix/syslib.h>
15 #include <minix/safecopies.h>
16 #include <minix/bitmap.h>
17 #include <minix/vfsif.h>
19 #include <machine/vmparam.h>
21 #include <errno.h>
22 #include <string.h>
23 #include <env.h>
24 #include <stdio.h>
25 #include <fcntl.h>
26 #include <signal.h>
27 #include <assert.h>
29 #include "glo.h"
30 #include "proto.h"
31 #include "util.h"
32 #include "region.h"
34 struct pf_state {
35 endpoint_t ep;
36 vir_bytes vaddr;
37 u32_t err;
40 struct hm_state {
41 endpoint_t caller; /* KERNEL or process? if NONE, no callback */
42 endpoint_t requestor; /* on behalf of whom? */
43 int transid; /* VFS transaction id if valid */
44 struct vmproc *vmp; /* target address space */
45 vir_bytes mem, len; /* memory range */
46 int wrflag; /* must it be writable or not */
47 int valid; /* sanity check */
48 int vfs_avail; /* may vfs be called to satisfy this range? */
49 #define VALID 0xc0ff1
52 static void handle_memory_continue(struct vmproc *vmp, message *m,
53 void *arg, void *statearg);
54 static int handle_memory_step(struct hm_state *hmstate, int retry);
55 static void handle_memory_final(struct hm_state *state, int result);
57 /*===========================================================================*
58 * pf_errstr *
59 *===========================================================================*/
60 char *pf_errstr(u32_t err)
62 static char buf[100];
64 snprintf(buf, sizeof(buf), "err 0x%lx ", (long)err);
65 if(PFERR_NOPAGE(err)) strcat(buf, "nopage ");
66 if(PFERR_PROT(err)) strcat(buf, "protection ");
67 if(PFERR_WRITE(err)) strcat(buf, "write");
68 if(PFERR_READ(err)) strcat(buf, "read");
70 return buf;
73 static void pf_cont(struct vmproc *vmp, message *m, void *arg, void *statearg);
75 static void handle_memory_continue(struct vmproc *vmp, message *m, void *arg, void *statearg);
77 static void handle_pagefault(endpoint_t ep, vir_bytes addr, u32_t err, int retry)
79 struct vmproc *vmp;
80 int s, result;
81 struct vir_region *region;
82 vir_bytes offset;
83 int p, wr = PFERR_WRITE(err);
84 int io = 0;
86 if(vm_isokendpt(ep, &p) != OK)
87 panic("handle_pagefault: endpoint wrong: %d", ep);
89 vmp = &vmproc[p];
90 assert(vmp->vm_flags & VMF_INUSE);
92 /* See if address is valid at all. */
93 if(!(region = map_lookup(vmp, addr, NULL))) {
94 if(PFERR_PROT(err)) {
95 printf("VM: pagefault: SIGSEGV %d protected addr 0x%lx; %s\n",
96 ep, addr, pf_errstr(err));
97 } else {
98 assert(PFERR_NOPAGE(err));
99 printf("VM: pagefault: SIGSEGV %d bad addr 0x%lx; %s\n",
100 ep, addr, pf_errstr(err));
101 sys_diagctl_stacktrace(ep);
103 if((s=sys_kill(vmp->vm_endpoint, SIGSEGV)) != OK)
104 panic("sys_kill failed: %d", s);
105 if((s=sys_vmctl(ep, VMCTL_CLEAR_PAGEFAULT, 0 /*unused*/)) != OK)
106 panic("do_pagefaults: sys_vmctl failed: %d", ep);
107 return;
110 /* If process was writing, see if it's writable. */
111 if(!(region->flags & VR_WRITABLE) && wr) {
112 printf("VM: pagefault: SIGSEGV %d ro map 0x%lx %s\n",
113 ep, addr, pf_errstr(err));
114 if((s=sys_kill(vmp->vm_endpoint, SIGSEGV)) != OK)
115 panic("sys_kill failed: %d", s);
116 if((s=sys_vmctl(ep, VMCTL_CLEAR_PAGEFAULT, 0 /*unused*/)) != OK)
117 panic("do_pagefaults: sys_vmctl failed: %d", ep);
118 return;
121 assert(addr >= region->vaddr);
122 offset = addr - region->vaddr;
124 /* Access is allowed; handle it. */
125 if(retry) {
126 result = map_pf(vmp, region, offset, wr, NULL, NULL, 0, &io);
127 assert(result != SUSPEND);
128 } else {
129 struct pf_state state;
130 state.ep = ep;
131 state.vaddr = addr;
132 state.err = err;
133 result = map_pf(vmp, region, offset, wr, pf_cont,
134 &state, sizeof(state), &io);
136 if (io)
137 vmp->vm_major_page_fault++;
138 else
139 vmp->vm_minor_page_fault++;
141 if(result == SUSPEND) {
142 return;
145 if(result != OK) {
146 printf("VM: pagefault: SIGSEGV %d pagefault not handled\n", ep);
147 if((s=sys_kill(ep, SIGSEGV)) != OK)
148 panic("sys_kill failed: %d", s);
149 if((s=sys_vmctl(ep, VMCTL_CLEAR_PAGEFAULT, 0 /*unused*/)) != OK)
150 panic("do_pagefaults: sys_vmctl failed: %d", ep);
151 return;
154 pt_clearmapcache();
156 /* Pagefault is handled, so now reactivate the process. */
157 if((s=sys_vmctl(ep, VMCTL_CLEAR_PAGEFAULT, 0 /*unused*/)) != OK)
158 panic("do_pagefaults: sys_vmctl failed: %d", ep);
162 static void pf_cont(struct vmproc *vmp, message *m,
163 void *arg, void *statearg)
165 struct pf_state *state = statearg;
166 int p;
167 if(vm_isokendpt(state->ep, &p) != OK) return; /* signal */
168 handle_pagefault(state->ep, state->vaddr, state->err, 1);
171 static void handle_memory_continue(struct vmproc *vmp, message *m,
172 void *arg, void *statearg)
174 int r;
175 struct hm_state *state = statearg;
176 assert(state);
177 assert(state->caller != NONE);
178 assert(state->valid == VALID);
180 if(m->VMV_RESULT != OK) {
181 printf("VM: handle_memory_continue: vfs request failed\n");
182 handle_memory_final(state, m->VMV_RESULT);
183 return;
186 r = handle_memory_step(state, TRUE /*retry*/);
188 assert(state->valid == VALID);
190 if(r == SUSPEND) {
191 return;
194 assert(state->valid == VALID);
196 handle_memory_final(state, r);
199 static void handle_memory_final(struct hm_state *state, int result)
201 int r;
203 assert(state);
204 assert(state->valid == VALID);
206 if(state->caller == KERNEL) {
207 if((r=sys_vmctl(state->requestor, VMCTL_MEMREQ_REPLY, result)) != OK)
208 panic("handle_memory_continue: sys_vmctl failed: %d", r);
209 } else if(state->caller != NONE) {
210 /* Send a reply msg */
211 message msg;
212 memset(&msg, 0, sizeof(msg));
213 msg.m_type = result;
215 if(IS_VFS_FS_TRANSID(state->transid)) {
216 assert(state->caller == VFS_PROC_NR);
217 /* If a transaction ID was set, reset it */
218 msg.m_type = TRNS_ADD_ID(msg.m_type, state->transid);
221 if(asynsend3(state->caller, &msg, 0) != OK) {
222 panic("handle_memory_final: asynsend3 failed");
225 assert(state->valid == VALID);
227 /* fail fast if anyone tries to access this state again */
228 memset(state, 0, sizeof(*state));
232 /*===========================================================================*
233 * do_pagefaults *
234 *===========================================================================*/
235 void do_pagefaults(message *m)
237 handle_pagefault(m->m_source, m->VPF_ADDR, m->VPF_FLAGS, 0);
240 int handle_memory_once(struct vmproc *vmp, vir_bytes mem, vir_bytes len,
241 int wrflag)
243 int r;
244 r = handle_memory_start(vmp, mem, len, wrflag, NONE, NONE, 0, 0);
245 assert(r != SUSPEND);
246 return r;
249 int handle_memory_start(struct vmproc *vmp, vir_bytes mem, vir_bytes len,
250 int wrflag, endpoint_t caller, endpoint_t requestor, int transid,
251 int vfs_avail)
253 int r;
254 struct hm_state state;
255 vir_bytes o;
257 if((o = mem % PAGE_SIZE)) {
258 mem -= o;
259 len += o;
262 len = roundup(len, PAGE_SIZE);
264 state.vmp = vmp;
265 state.mem = mem;
266 state.len = len;
267 state.wrflag = wrflag;
268 state.requestor = requestor;
269 state.caller = caller;
270 state.transid = transid;
271 state.valid = VALID;
272 state.vfs_avail = vfs_avail;
274 r = handle_memory_step(&state, FALSE /*retry*/);
276 if(r == SUSPEND) {
277 assert(caller != NONE);
278 assert(vfs_avail);
279 } else {
280 handle_memory_final(&state, r);
283 return r;
286 /*===========================================================================*
287 * do_memory *
288 *===========================================================================*/
289 void do_memory(void)
291 endpoint_t who, who_s, requestor;
292 vir_bytes mem, mem_s;
293 vir_bytes len;
294 int wrflag;
296 while(1) {
297 int p, r = OK;
298 struct vmproc *vmp;
300 r = sys_vmctl_get_memreq(&who, &mem, &len, &wrflag, &who_s,
301 &mem_s, &requestor);
303 switch(r) {
304 case VMPTYPE_CHECK:
306 int transid = 0;
307 int vfs_avail;
309 if(vm_isokendpt(who, &p) != OK)
310 panic("do_memory: bad endpoint: %d", who);
311 vmp = &vmproc[p];
313 assert(!IS_VFS_FS_TRANSID(transid));
315 /* is VFS blocked? */
316 if(requestor == VFS_PROC_NR) vfs_avail = 0;
317 else vfs_avail = 1;
319 handle_memory_start(vmp, mem, len, wrflag,
320 KERNEL, requestor, transid, vfs_avail);
322 break;
325 default:
326 return;
331 static int handle_memory_step(struct hm_state *hmstate, int retry)
333 struct vir_region *region;
334 vir_bytes offset, length, sublen;
335 int r;
337 /* Page-align memory and length. */
338 assert(hmstate);
339 assert(hmstate->valid == VALID);
340 assert(!(hmstate->mem % VM_PAGE_SIZE));
341 assert(!(hmstate->len % VM_PAGE_SIZE));
343 while(hmstate->len > 0) {
344 if(!(region = map_lookup(hmstate->vmp, hmstate->mem, NULL))) {
345 #if VERBOSE
346 map_printmap(hmstate->vmp);
347 printf("VM: do_memory: memory doesn't exist\n");
348 #endif
349 return EFAULT;
350 } else if(!(region->flags & VR_WRITABLE) && hmstate->wrflag) {
351 #if VERBOSE
352 printf("VM: do_memory: write to unwritable map\n");
353 #endif
354 return EFAULT;
357 assert(region->vaddr <= hmstate->mem);
358 assert(!(region->vaddr % VM_PAGE_SIZE));
359 offset = hmstate->mem - region->vaddr;
360 length = hmstate->len;
361 if (offset + length > region->length)
362 length = region->length - offset;
365 * Handle one page at a time. While it seems beneficial to
366 * handle multiple pages in one go, the opposite is true:
367 * map_handle_memory will handle one page at a time anyway, and
368 * if we give it the whole range multiple times, it will have
369 * to recheck pages it already handled. In addition, in order
370 * to handle one-shot pages, we need to know whether we are
371 * retrying a single page, and that is not possible if this is
372 * hidden in map_handle_memory.
374 while (length > 0) {
375 sublen = VM_PAGE_SIZE;
377 assert(sublen <= length);
378 assert(offset + sublen <= region->length);
381 * Upon the second try for this range, do not allow
382 * calling into VFS again. This prevents eternal loops
383 * in case the FS messes up, and allows one-shot pages
384 * to be mapped in on the second call.
386 if((region->def_memtype == &mem_type_mappedfile &&
387 (!hmstate->vfs_avail || retry)) ||
388 hmstate->caller == NONE) {
389 r = map_handle_memory(hmstate->vmp, region,
390 offset, sublen, hmstate->wrflag, NULL,
391 NULL, 0);
392 assert(r != SUSPEND);
393 } else {
394 r = map_handle_memory(hmstate->vmp, region,
395 offset, sublen, hmstate->wrflag,
396 handle_memory_continue, hmstate,
397 sizeof(*hmstate));
400 if(r != OK) return r;
402 hmstate->len -= sublen;
403 hmstate->mem += sublen;
405 offset += sublen;
406 length -= sublen;
407 retry = FALSE;
411 return OK;